diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml index 6d4a22921b1..318a7869d45 100644 --- a/.github/workflows/post-commit.yml +++ b/.github/workflows/post-commit.yml @@ -125,6 +125,12 @@ jobs: steps: - name: Checkout project uses: actions/checkout@v2 + if: matrix.check != 'bats' + - name: Checkout project with history + uses: actions/checkout@v2 + with: + fetch-depth: 0 + if: matrix.check == 'bats' - name: Cache for maven dependencies uses: actions/cache@v2 with: diff --git a/.gitignore b/.gitignore index a302cc04edb..1ec550bb990 100644 --- a/.gitignore +++ b/.gitignore @@ -68,6 +68,7 @@ hadoop-ozone/recon/node_modules .mvn .dev-tools - +dev-support/ci/bats-assert +dev-support/ci/bats-support hadoop-ozone/dist/src/main/license/current.txt diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f467c80a706..af9958941fa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -20,7 +20,8 @@ We welcome contributions of: * Unit Tests (JUnit / Java) * Acceptance Tests (docker + robot framework) * Blockade tests (python + blockade) - * Performance: We have multiple type of load generator / benchmark tools (`ozone freon`, `ozone genesis`), which can be used to test cluster and report problems. + * Performance: We have multiple type of load generator / benchmark tools (`ozone freon`), + which can be used to test cluster and report problems. * **Bug reports** pointing out broken functionality, docs, or suggestions for improvements are always welcome! ## Who To Contact diff --git a/dev-support/ci/selective_ci_checks.bats b/dev-support/ci/selective_ci_checks.bats index 7e2dff321ef..809312308ed 100644 --- a/dev-support/ci/selective_ci_checks.bats +++ b/dev-support/ci/selective_ci_checks.bats @@ -66,6 +66,17 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=true } +@test "runner image update" { + run dev-support/ci/selective_ci_checks.sh b95eeba82a + + assert_output -p 'basic-checks=["rat"]' + assert_output -p needs-build=true + assert_output -p needs-compose-tests=true + assert_output -p needs-dependency-check=true + assert_output -p needs-integration-tests=false + assert_output -p needs-kubernetes-tests=true +} + @test "check script" { run dev-support/ci/selective_ci_checks.sh 316899152 @@ -77,10 +88,21 @@ load bats-assert/load.bash assert_output -p needs-kubernetes-tests=true } +@test "integration and unit" { + run dev-support/ci/selective_ci_checks.sh 9aebf6e25 + + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p needs-build=false + assert_output -p needs-compose-tests=false + assert_output -p needs-dependency-check=false + assert_output -p needs-integration-tests=true + assert_output -p needs-kubernetes-tests=false +} + @test "integration only" { run dev-support/ci/selective_ci_checks.sh 61396ba9f - assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs","unit"]' + assert_output -p 'basic-checks=["rat","author","checkstyle","findbugs"]' assert_output -p needs-build=false assert_output -p needs-compose-tests=false assert_output -p needs-dependency-check=false diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index 4a490cd56a4..3989afe36b7 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -219,7 +219,10 @@ function get_count_compose_files() { start_end::group_start "Count compose files" local pattern_array=( "^hadoop-ozone/dev-support/checks/acceptance.sh" - "^hadoop-ozone/dist/src/main/compose" + "^hadoop-ozone/dist" + ) + local ignore_array=( + "^hadoop-ozone/dist/src/main/k8s" ) filter_changed_files true COUNT_COMPOSE_CHANGED_FILES=${match_count} @@ -258,7 +261,10 @@ function get_count_kubernetes_files() { start_end::group_start "Count kubernetes files" local pattern_array=( "^hadoop-ozone/dev-support/checks/kubernetes.sh" - "^hadoop-ozone/dist/src/main/k8s" + "^hadoop-ozone/dist" + ) + local ignore_array=( + "^hadoop-ozone/dist/src/main/compose" ) filter_changed_files true COUNT_KUBERNETES_CHANGED_FILES=${match_count} @@ -332,6 +338,9 @@ function check_needs_checkstyle() { "pom.xml" "src/..../java" ) + local ignore_array=( + "^hadoop-ozone/dist" + ) filter_changed_files if [[ ${match_count} != "0" ]]; then @@ -373,6 +382,9 @@ function check_needs_findbugs() { "pom.xml" "src/..../java" ) + local ignore_array=( + "^hadoop-ozone/dist" + ) filter_changed_files if [[ ${match_count} != "0" ]]; then @@ -391,6 +403,11 @@ function check_needs_unit_test() { "src/..../java" "src/..../resources" ) + local ignore_array=( + "^hadoop-ozone/dist" + "^hadoop-ozone/fault-injection-test/mini-chaos-tests" + "^hadoop-ozone/integration-test" + ) filter_changed_files if [[ ${match_count} != "0" ]]; then diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index f19853c1aa4..a2684955460 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -159,7 +159,7 @@ public void connect(String encodedToken) throws Exception { private synchronized void connectToDatanode(DatanodeDetails dn) throws IOException { - if (isConnected(dn)){ + if (isConnected(dn)) { return; } // read port from the data node, on failure use default configured @@ -269,10 +269,10 @@ public ContainerCommandResponseProto sendCommand( Thread.currentThread().interrupt(); } } - try{ + try { for (Map.Entry > - entry : futureHashMap.entrySet()){ + entry : futureHashMap.entrySet()) { responseProtoHashMap.put(entry.getKey(), entry.getValue().get()); } } catch (InterruptedException e) { @@ -538,7 +538,7 @@ public void onCompleted() { } private synchronized void checkOpen(DatanodeDetails dn) - throws IOException{ + throws IOException { if (closed) { throw new IOException("This channel is not connected."); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index 6b74adb07f8..07fd0a8c2d4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -206,8 +206,8 @@ public static void verifyKeyName(String keyName) { if (keyName == null) { throw new IllegalArgumentException("Key name is null"); } - if(!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX - .matcher(keyName).matches()){ + if (!OzoneConsts.KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX + .matcher(keyName).matches()) { throw new IllegalArgumentException("Invalid key name: " + keyName); } } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java index 7475db27d01..bd97cf248da 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockInputStream.java @@ -152,7 +152,7 @@ public synchronized void initialize() throws IOException { // retry according to retry policy. chunks = getChunkInfos(); break; - } catch(SCMSecurityException ex) { + } catch (SCMSecurityException ex) { throw ex; } catch (StorageContainerException ex) { refreshPipeline(ex); @@ -340,9 +340,9 @@ synchronized int readWithStrategy(ByteReaderStrategy strategy) throws } else { throw e; } - } catch(SCMSecurityException ex) { + } catch (SCMSecurityException ex) { throw ex; - } catch(IOException ex) { + } catch (IOException ex) { // We got a IOException which might be due // to DN down or connectivity issue. if (shouldRetryRead(ex)) { @@ -512,7 +512,7 @@ synchronized long getBlockPosition() { } @Override - public void unbuffer() { + public synchronized void unbuffer() { storePosition(); releaseClient(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java index 859d8080e6a..8b3f817a2e4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java @@ -487,7 +487,7 @@ public void flush() throws IOException { } catch (Throwable e) { String msg = "Failed to flush. error: " + e.getMessage(); LOG.error(msg, e); - throw new RuntimeException(msg, e); + throw e; } } } @@ -553,7 +553,7 @@ public void close() throws IOException { } catch (Throwable e) { String msg = "Failed to flush. error: " + e.getMessage(); LOG.error(msg, e); - throw new RuntimeException(msg, e); + throw e; } finally { cleanup(false); } @@ -708,7 +708,7 @@ private void handleInterruptedException(Exception ex, boolean processExecutionException) throws IOException { LOG.error("Command execution was interrupted."); - if(processExecutionException) { + if (processExecutionException) { handleExecutionException(ex); } else { throw new IOException(EXCEPTION_MSG + ex.toString(), ex); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java index 94fa87a71e2..a520f8a6a5a 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BufferPool.java @@ -46,7 +46,7 @@ public BufferPool(int bufferSize, int capacity) { } public BufferPool(int bufferSize, int capacity, - Function byteStringConversion){ + Function byteStringConversion) { this.capacity = capacity; this.bufferSize = bufferSize; bufferList = new ArrayList<>(capacity); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java index 7238f2a2a06..802adc11f5e 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/RatisBlockOutputStream.java @@ -97,7 +97,7 @@ void releaseBuffersOnException() { @Override XceiverClientReply sendWatchForCommit(boolean bufferFull) throws IOException { - return bufferFull? commitWatcher.watchOnFirstIndex() + return bufferFull ? commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex(); } diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 8f93610111a..59003274920 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -166,6 +166,21 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-jupiter-api test + + org.junit.jupiter + junit-jupiter-engine + test + + + org.junit.vintage + junit-vintage-engine + test + + + org.junit.platform + junit-platform-launcher + test + io.jaegertracing jaeger-client diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java index f538595db50..d1e3c192824 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java @@ -151,7 +151,7 @@ public final class HddsConfigKeys { */ public static final String HDDS_X509_MAX_DURATION = "hdds.x509.max.duration"; // Limit Certificate duration to a max value of 5 years. - public static final String HDDS_X509_MAX_DURATION_DEFAULT= "P1865D"; + public static final String HDDS_X509_MAX_DURATION_DEFAULT = "P1865D"; public static final String HDDS_X509_SIGNATURE_ALGO = "hdds.x509.signature.algorithm"; public static final String HDDS_X509_SIGNATURE_ALGO_DEFAULT = "SHA256withRSA"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index 5abe8fbb318..ffbb3e33401 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -632,7 +632,7 @@ public static long getShutDownTimeOut(ConfigurationSource conf) { * Utility method to round up bytes into the nearest MB. */ public static int roundupMb(long bytes) { - return (int)Math.ceil((double) bytes/(double) ONE_MB); + return (int)Math.ceil((double) bytes / (double) ONE_MB); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java index 77d193035f5..792a9d0d840 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java @@ -151,6 +151,6 @@ public static String createStartupShutdownMessage(VersionInfo versionInfo, public static String appendIfNotPresent(String str, char c) { Preconditions.checkNotNull(str, "Input string is null"); - return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c: str; + return str.isEmpty() || str.charAt(str.length() - 1) != c ? str + c : str; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java index 37da0a3b270..03dc00518d2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/annotation/InterfaceAudience.java @@ -50,7 +50,7 @@ public final class InterfaceAudience { */ @Documented @Retention(RetentionPolicy.RUNTIME) - public @interface Public {}; + public @interface Public { }; /** * Intended only for the project(s) specified in the annotation. @@ -67,7 +67,7 @@ public final class InterfaceAudience { */ @Documented @Retention(RetentionPolicy.RUNTIME) - public @interface Private {}; + public @interface Private { }; - private InterfaceAudience() {} // Audience can't exist on its own + private InterfaceAudience() { } // Audience can't exist on its own } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java index 9945690a9b0..794ebd2d740 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneQuota.java @@ -43,7 +43,7 @@ public final class OzoneQuota { public static final String OZONE_QUOTA_TB = "TB"; /** Quota Units.*/ - public enum Units {B, KB, MB, GB, TB} + public enum Units { B, KB, MB, GB, TB } // Quota to decide how many buckets can be created. private long quotaInNamespace; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java index 205cca1100c..5403469fa76 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/QuotaList.java @@ -28,13 +28,14 @@ public class QuotaList { private ArrayList unitQuota; private ArrayList sizeQuota; - public QuotaList(){ + public QuotaList() { ozoneQuota = new ArrayList(); unitQuota = new ArrayList(); sizeQuota = new ArrayList(); } - public void addQuotaList(String oQuota, OzoneQuota.Units uQuota, Long sQuota){ + public void addQuotaList( + String oQuota, OzoneQuota.Units uQuota, Long sQuota) { ozoneQuota.add(oQuota); unitQuota.add(uQuota); sizeQuota.add(sQuota); @@ -52,15 +53,15 @@ public ArrayList getUnitQuotaArray() { return this.unitQuota; } - public OzoneQuota.Units getUnits(String oQuota){ + public OzoneQuota.Units getUnits(String oQuota) { return unitQuota.get(ozoneQuota.indexOf(oQuota)); } - public Long getQuotaSize(OzoneQuota.Units uQuota){ + public Long getQuotaSize(OzoneQuota.Units uQuota) { return sizeQuota.get(unitQuota.indexOf(uQuota)); } - public OzoneQuota.Units getQuotaUnit(Long sQuota){ + public OzoneQuota.Units getQuotaUnit(Long sQuota) { return unitQuota.get(sizeQuota.indexOf(sQuota)); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java index 044bd6f8334..8623a0e7f34 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationFactor.java @@ -46,7 +46,7 @@ public enum ReplicationFactor { * @return ReplicationFactor */ public static ReplicationFactor valueOf(int value) { - if(value == 1) { + if (value == 1) { return ONE; } if (value == 3) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index d24cb68d507..6b7e7c63848 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -109,6 +109,7 @@ public OzoneConfiguration(Configuration conf) { setClassLoader(conf.getClassLoader()); if (!(conf instanceof OzoneConfiguration)) { loadDefaults(); + addResource(conf); } } @@ -126,6 +127,7 @@ private void loadDefaults() { } catch (IOException e) { e.printStackTrace(); } + addResource("ozone-default.xml"); // Adding core-site here because properties from core-site are // distributed to executors by spark driver. Ozone properties which are // added to core-site, will be overridden by properties from adding Resource @@ -242,7 +244,6 @@ public static void activate() { // adds the default resources Configuration.addDefaultResource("hdfs-default.xml"); Configuration.addDefaultResource("hdfs-site.xml"); - Configuration.addDefaultResource("ozone-default.xml"); } /** @@ -295,7 +296,7 @@ public Map getPropsWithPrefix(String confPrefix) { return configMap; } - private static void addDeprecatedKeys(){ + private static void addDeprecatedKeys() { Configuration.addDeprecations(new DeprecationDelta[]{ new DeprecationDelta("ozone.datanode.pipeline.limit", ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT), @@ -304,7 +305,9 @@ private static void addDeprecatedKeys(){ HDDS_DATANODE_RATIS_PREFIX_KEY + "." + RaftServerConfigKeys.PREFIX + "." + "rpc.slowness.timeout"), new DeprecationDelta("dfs.datanode.keytab.file", - DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY) + DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_KEYTAB_FILE_KEY), + new DeprecationDelta("ozone.scm.chunk.layout", + ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY) }); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java index 782a3e18a43..319fefdf4e7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java @@ -127,7 +127,7 @@ private void loadInitialValue() { private void refresh() { //only one `refresh` can be running at a certain moment - if(isRefreshRunning.compareAndSet(false, true)) { + if (isRefreshRunning.compareAndSet(false, true)) { try { cachedValue.set(source.getUsedSpace()); } catch (RuntimeException e) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java index aef3c298afc..01bd0f482af 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java @@ -713,7 +713,7 @@ public Builder setSetupTime(long time) { * * @return DatanodeDetails.Builder */ - public Builder setPersistedOpState(HddsProtos.NodeOperationalState state){ + public Builder setPersistedOpState(HddsProtos.NodeOperationalState state) { this.persistedOpState = state; return this; } @@ -726,7 +726,7 @@ public Builder setPersistedOpState(HddsProtos.NodeOperationalState state){ * * @return DatanodeDetails.Builder */ - public Builder setPersistedOpStateExpiry(long expiry){ + public Builder setPersistedOpStateExpiry(long expiry) { this.persistedOpStateExpiryEpochSec = expiry; return this; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 50480c1dcaf..c1cd865036f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -136,7 +136,7 @@ private static RaftGroup emptyRaftGroup() { } private static RaftGroup newRaftGroup(Collection peers) { - return peers.isEmpty()? emptyRaftGroup() + return peers.isEmpty() ? emptyRaftGroup() : RaftGroup.valueOf(DUMMY_GROUP_ID, peers); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java index c91a186b35f..d72e27a18ae 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfig.java @@ -40,7 +40,7 @@ public class ReconConfig { type = ConfigType.STRING, defaultValue = "", tags = { ConfigTag.SECURITY, ConfigTag.RECON, ConfigTag.OZONE }, - description = "The keytab file used by Recon daemon to login as "+ + description = "The keytab file used by Recon daemon to login as " + "its service principal." ) private String keytab; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java index b5f6e481211..14a229b5e7a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java @@ -32,7 +32,7 @@ * Ozone configuration. */ public final class ByteStringConversion { - private ByteStringConversion(){} // no instantiation. + private ByteStringConversion() { } // no instantiation. /** * Creates the conversion function to be used to convert ByteBuffers to diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java index baee0384fb4..ce79ec2abbf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfig.java @@ -43,7 +43,7 @@ public class ScmConfig { type = ConfigType.STRING, defaultValue = "", tags = { ConfigTag.SECURITY, ConfigTag.OZONE }, - description = "The keytab file used by SCM daemon to login as "+ + description = "The keytab file used by SCM daemon to login as " + "its service principal." ) private String keytab; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index a4c314fc332..c1f43c6eb53 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -138,8 +138,8 @@ public final class ScmConfigKeys { public static final String OZONE_CHUNK_READ_BUFFER_DEFAULT_SIZE_DEFAULT = "64KB"; - public static final String OZONE_SCM_CHUNK_LAYOUT_KEY = - "ozone.scm.chunk.layout"; + public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = + "ozone.scm.container.layout"; public static final String OZONE_SCM_CLIENT_PORT_KEY = "ozone.scm.client.port"; @@ -435,7 +435,7 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_HA_ENABLE_KEY = "ozone.scm.ratis.enable"; public static final boolean OZONE_SCM_HA_ENABLE_DEFAULT - = false; + = true; public static final String OZONE_SCM_RATIS_PORT_KEY = "ozone.scm.ratis.port"; public static final int OZONE_SCM_RATIS_PORT_DEFAULT @@ -525,7 +525,7 @@ public final class ScmConfigKeys { public static final String OZONE_SCM_HA_RAFT_LOG_PURGE_GAP = "ozone.scm.ha.ratis.log.purge.gap"; - public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT =1000000; + public static final int OZONE_SCM_HA_RAFT_LOG_PURGE_GAP_DEFAULT = 1000000; public static final String OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD = "ozone.scm.ha.ratis.snapshot.threshold"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index 351870a3cd1..f1885f890e1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -20,6 +20,8 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.scm.DatanodeAdminError; +import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -70,6 +72,16 @@ public interface ScmClient extends Closeable { ContainerWithPipeline getContainerWithPipeline(long containerId) throws IOException; + /** + * Gets the list of ReplicaInfo known by SCM for a given container. + * @param containerId - The Container ID + * @return List of ContainerReplicaInfo for the container or an empty list + * if none. + * @throws IOException + */ + List getContainerReplicas( + long containerId) throws IOException; + /** * Close a container. * @@ -308,12 +320,20 @@ Map> getSafeModeRuleStatuses() */ boolean getReplicationManagerStatus() throws IOException; + /** + * Returns the latest container summary report generated by Replication + * Manager. + * @return The latest ReplicationManagerReport. + * @throws IOException + */ + ReplicationManagerReport getReplicationManagerReport() throws IOException; + /** * Start ContainerBalancer. */ boolean startContainerBalancer(Optional threshold, - Optional idleiterations, - Optional maxDatanodesRatioToInvolvePerIteration, + Optional iterations, + Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, Optional maxSizeLeavingSourceInGB) throws IOException; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaInfo.java new file mode 100644 index 00000000000..b30dff716d2 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaInfo.java @@ -0,0 +1,129 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import java.util.UUID; + +/** + * Class which stores ContainerReplica details on the client. + */ +public final class ContainerReplicaInfo { + + private long containerID; + private String state; + private DatanodeDetails datanodeDetails; + private UUID placeOfBirth; + private long sequenceId; + private long keyCount; + private long bytesUsed; + + public static ContainerReplicaInfo fromProto( + HddsProtos.SCMContainerReplicaProto proto) { + ContainerReplicaInfo.Builder builder = new ContainerReplicaInfo.Builder(); + builder.setContainerID(proto.getContainerID()) + .setState(proto.getState()) + .setDatanodeDetails(DatanodeDetails + .getFromProtoBuf(proto.getDatanodeDetails())) + .setPlaceOfBirth(UUID.fromString(proto.getPlaceOfBirth())) + .setSequenceId(proto.getSequenceID()) + .setKeyCount(proto.getKeyCount()) + .setBytesUsed(proto.getBytesUsed()); + return builder.build(); + } + + private ContainerReplicaInfo() { + } + + public long getContainerID() { + return containerID; + } + + public String getState() { + return state; + } + + public DatanodeDetails getDatanodeDetails() { + return datanodeDetails; + } + + public UUID getPlaceOfBirth() { + return placeOfBirth; + } + + public long getSequenceId() { + return sequenceId; + } + + public long getKeyCount() { + return keyCount; + } + + public long getBytesUsed() { + return bytesUsed; + } + + /** + * Builder for ContainerReplicaInfo class. + */ + public static class Builder { + + private final ContainerReplicaInfo subject = new ContainerReplicaInfo(); + + public Builder setContainerID(long containerID) { + subject.containerID = containerID; + return this; + } + + public Builder setState(String state) { + subject.state = state; + return this; + } + + public Builder setDatanodeDetails(DatanodeDetails datanodeDetails) { + subject.datanodeDetails = datanodeDetails; + return this; + } + + public Builder setPlaceOfBirth(UUID placeOfBirth) { + subject.placeOfBirth = placeOfBirth; + return this; + } + + public Builder setSequenceId(long sequenceId) { + subject.sequenceId = sequenceId; + return this; + } + + public Builder setKeyCount(long keyCount) { + subject.keyCount = keyCount; + return this; + } + + public Builder setBytesUsed(long bytesUsed) { + subject.bytesUsed = bytesUsed; + return this; + } + + public ContainerReplicaInfo build() { + return subject; + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java new file mode 100644 index 00000000000..2f2a7bf3e5e --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManagerReport.java @@ -0,0 +1,283 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.LongAdder; +import java.util.stream.Collectors; + +/** + * This class is used by ReplicationManager. Each time ReplicationManager runs, + * it creates a new instance of this class and increments the various counters + * to allow for creating a report on the various container states within the + * system. There is a counter for each LifeCycleState (open, closing, closed + * etc) and the sum of each of the lifecycle state counters should equal the + * total number of containers in SCM. Ie, each container can only be in one of + * the Lifecycle states at any time. + * + * Additionally, there are a set of counters for the "health state" of the + * containers, defined here in the HealthState enum. It is normal for containers + * to be in these health states from time to time, but the presence of a + * container in one of these health states generally means cluster is in a + * degraded state. Normally, the cluster will recover by itself, but manual + * intervention may be needed in some cases. + * + * To aid debugging, when containers are in one of the health states, a list of + * up to SAMPLE_LIMIT container IDs are recorded in the report for each of the + * states. + */ +public class ReplicationManagerReport { + + public static final int SAMPLE_LIMIT = 100; + private long reportTimeStamp; + + /** + * Enum representing various health states a container can be in. + */ + public enum HealthState { + UNDER_REPLICATED("Containers with insufficient replicas", + "NumUnderReplicatedContainers"), + MIS_REPLICATED("Containers with insufficient racks", + "NumMisReplicatedContainers"), + OVER_REPLICATED("Containers with more replicas than required", + "NumOverReplicatedContainers"), + MISSING("Containers with no online replicas", + "NumMissingContainers"), + UNHEALTHY( + "Containers Closed or Quasi_Closed having some replicas in " + + "a different state", "NumUnhealthyContainers"), + EMPTY("Containers having no blocks", "NumEmptyContainers"), + OPEN_UNHEALTHY( + "Containers open and having replicas with different states", + "NumOpenUnhealthyContainers"), + QUASI_CLOSED_STUCK( + "Containers QuasiClosed with insufficient datanode origins", + "NumStuckQuasiClosedContainers"); + + private String description; + private String metricName; + + HealthState(String desc, String name) { + this.description = desc; + this.metricName = name; + } + + public String getMetricName() { + return this.metricName; + } + + public String getDescription() { + return this.description; + } + } + + private final Map stats; + private final Map> containerSample + = new ConcurrentHashMap<>(); + + public static ReplicationManagerReport fromProtobuf( + HddsProtos.ReplicationManagerReportProto proto) { + ReplicationManagerReport report = new ReplicationManagerReport(); + report.setTimestamp(proto.getTimestamp()); + for (HddsProtos.KeyIntValue stat : proto.getStatList()) { + report.setStat(stat.getKey(), stat.getValue()); + } + for (HddsProtos.KeyContainerIDList sample : proto.getStatSampleList()) { + report.setSample(sample.getKey(), sample.getContainerList() + .stream() + .map(c -> ContainerID.getFromProtobuf(c)) + .collect(Collectors.toList())); + } + return report; + } + + public ReplicationManagerReport() { + stats = createStatsMap(); + } + + public void increment(HealthState stat) { + increment(stat.toString()); + } + + public void increment(HddsProtos.LifeCycleState stat) { + increment(stat.toString()); + } + + public void incrementAndSample(HealthState stat, ContainerID container) { + incrementAndSample(stat.toString(), container); + } + + public void incrementAndSample(HddsProtos.LifeCycleState stat, + ContainerID container) { + incrementAndSample(stat.toString(), container); + } + + public void setComplete() { + reportTimeStamp = System.currentTimeMillis(); + } + + /** + * The epoch time in milli-seconds when this report was completed. + * @return epoch time in milli-seconds. + */ + public long getReportTimeStamp() { + return reportTimeStamp; + } + + /** + * Get the stat for the given LifeCycleState. If there is no stat available + * for that stat -1 is returned. + * @param stat The requested stat. + * @return The stat value or -1 if it is not present + */ + public long getStat(HddsProtos.LifeCycleState stat) { + return getStat(stat.toString()); + } + + /** + * Get the stat for the given HealthState. If there is no stat available + * for that stat -1 is returned. + * @param stat The requested stat. + * @return The stat value or -1 if it is not present + */ + public long getStat(HealthState stat) { + return getStat(stat.toString()); + } + + /** + * Returns the stat requested, or -1 if it does not exist. + * @param stat The request stat + * @return The value of the stat or -1 if it does not exist. + */ + private long getStat(String stat) { + LongAdder val = stats.get(stat); + if (val == null) { + return -1; + } + return val.longValue(); + } + + protected void setTimestamp(long timestamp) { + this.reportTimeStamp = timestamp; + } + + protected void setStat(String stat, long value) { + LongAdder adder = getStatAndEnsurePresent(stat); + if (adder.longValue() != 0) { + throw new IllegalStateException(stat + " is expected to be zero"); + } + adder.add(value); + } + + protected void setSample(String stat, List sample) { + // First get the stat, as we should not receive a sample for a stat which + // does not exist. + getStatAndEnsurePresent(stat); + // Now check there is not already a sample for this stat + List existingSample = containerSample.get(stat); + if (existingSample != null) { + throw new IllegalStateException(stat + + " is not expected to have existing samples"); + } + containerSample.put(stat, sample); + } + + public List getSample(HddsProtos.LifeCycleState stat) { + return getSample(stat.toString()); + } + + public List getSample(HealthState stat) { + return getSample(stat.toString()); + } + + private List getSample(String stat) { + List list = containerSample.get(stat); + if (list == null) { + return Collections.emptyList(); + } + synchronized (list) { + return new ArrayList<>(list); + } + } + + private void increment(String stat) { + getStatAndEnsurePresent(stat).increment(); + } + + private LongAdder getStatAndEnsurePresent(String stat) { + LongAdder adder = stats.get(stat); + if (adder == null) { + throw new IllegalArgumentException("Unexpected stat " + stat); + } + return adder; + } + + private void incrementAndSample(String stat, ContainerID container) { + increment(stat); + List list = containerSample + .computeIfAbsent(stat, k -> new ArrayList<>()); + synchronized (list) { + if (list.size() < SAMPLE_LIMIT) { + list.add(container); + } + } + } + + private Map createStatsMap() { + Map map = new HashMap<>(); + for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) { + map.put(s.toString(), new LongAdder()); + } + for (HealthState s : HealthState.values()) { + map.put(s.toString(), new LongAdder()); + } + return map; + } + + public HddsProtos.ReplicationManagerReportProto toProtobuf() { + HddsProtos.ReplicationManagerReportProto.Builder proto = + HddsProtos.ReplicationManagerReportProto.newBuilder(); + proto.setTimestamp(getReportTimeStamp()); + + for (Map.Entry e : stats.entrySet()) { + proto.addStat(HddsProtos.KeyIntValue.newBuilder() + .setKey(e.getKey()) + .setValue(e.getValue().longValue()) + .build()); + } + + for (Map.Entry> e : containerSample.entrySet()) { + HddsProtos.KeyContainerIDList.Builder sample + = HddsProtos.KeyContainerIDList.newBuilder(); + sample.setKey(e.getKey()); + for (ContainerID container : e.getValue()) { + sample.addContainer(container.getProtobuf()); + } + proto.addStatSample(sample.build()); + } + return proto.build(); + } + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java index 644659557af..b2b566a7a40 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/InnerNodeImpl.java @@ -38,7 +38,7 @@ */ public class InnerNodeImpl extends NodeImpl implements InnerNode { protected static class Factory implements InnerNode.Factory { - protected Factory() {} + protected Factory() { } @Override public InnerNodeImpl newInnerNode(String name, String location, @@ -93,7 +93,7 @@ public int getNumOfNodes(int level) { } else { for (Node node: childrenMap.values()) { if (node instanceof InnerNode) { - count += ((InnerNode)node).getNumOfNodes(level -1); + count += ((InnerNode)node).getNumOfNodes(level - 1); } else { throw new RuntimeException("Cannot support Level:" + level + " on this node " + this.toString()); @@ -119,7 +119,7 @@ public List getNodes(int level) { } else { for (Node node: childrenMap.values()) { if (node instanceof InnerNode) { - result.addAll(((InnerNode)node).getNodes(level -1)); + result.addAll(((InnerNode)node).getNodes(level - 1)); } else { throw new RuntimeException("Cannot support Level:" + level + " on this node " + this.toString()); @@ -265,7 +265,7 @@ public Node getNode(String loc) { if (child == null) { return null; } - if (path.length == 1){ + if (path.length == 1) { return child; } if (child instanceof InnerNode) { @@ -292,7 +292,7 @@ public Node getLeaf(int leafIndex) { } return getChildNode(leafIndex); } else { - for(Node node : childrenMap.values()) { + for (Node node : childrenMap.values()) { InnerNodeImpl child = (InnerNodeImpl)node; int leafCount = child.getNumOfLeaves(); if (leafIndex < leafCount) { @@ -468,7 +468,7 @@ private Node getLeafOnLeafParent(int leafIndex, List excludedScopes, if (leafIndex >= getNumOfChildren()) { return null; } - for(Node node : childrenMap.values()) { + for (Node node : childrenMap.values()) { if (excludedNodes != null && excludedNodes.contains(node)) { continue; } @@ -519,7 +519,7 @@ private InnerNodeImpl createChildNode(String name) { private Node getChildNode(int index) { Iterator iterator = childrenMap.values().iterator(); Node node = null; - while(index >= 0 && iterator.hasNext()) { + while (index >= 0 && iterator.hasNext()) { node = (Node)iterator.next(); index--; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 43765a6e5a8..206a0fd73b2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -45,7 +45,7 @@ * (computers) and inner nodes represent datacenter/core-switches/routers that * manages traffic in/out of data centers or racks. */ -public class NetworkTopologyImpl implements NetworkTopology{ +public class NetworkTopologyImpl implements NetworkTopology { public static final Logger LOG = LoggerFactory.getLogger(NetworkTopologyImpl.class); @@ -91,7 +91,7 @@ public void add(Node node) { Preconditions.checkArgument(node != null, "node cannot be null"); if (node instanceof InnerNode) { throw new IllegalArgumentException( - "Not allowed to add an inner node: "+ node.getNetworkFullPath()); + "Not allowed to add an inner node: " + node.getNetworkFullPath()); } int newDepth = NetUtils.locationToDepth(node.getNetworkLocation()) + 1; @@ -104,7 +104,7 @@ public void add(Node node) { boolean add; try { add = clusterTree.add(node); - }finally { + } finally { netlock.writeLock().unlock(); } @@ -126,12 +126,12 @@ public void remove(Node node) { Preconditions.checkArgument(node != null, "node cannot be null"); if (node instanceof InnerNode) { throw new IllegalArgumentException( - "Not allowed to remove an inner node: "+ node.getNetworkFullPath()); + "Not allowed to remove an inner node: " + node.getNetworkFullPath()); } netlock.writeLock().lock(); try { clusterTree.remove(node); - }finally { + } finally { netlock.writeLock().unlock(); } LOG.info("Removed a node: {}", node.getNetworkFullPath()); @@ -534,7 +534,7 @@ private Node chooseNodeInternal(String scope, int leafIndex, " generation " + ancestorGen); } // affinity ancestor should has overlap with scope - if (affinityAncestor.getNetworkFullPath().startsWith(scope)){ + if (affinityAncestor.getNetworkFullPath().startsWith(scope)) { finalScope = affinityAncestor.getNetworkFullPath(); } else if (!scope.startsWith(affinityAncestor.getNetworkFullPath())) { return null; @@ -655,21 +655,21 @@ public int getDistanceCost(Node node1, Node node2) { if (level1 > maxLevel || level2 > maxLevel) { return Integer.MAX_VALUE; } - while(level1 > level2 && node1 != null) { + while (level1 > level2 && node1 != null) { node1 = node1.getParent(); level1--; - cost += node1 == null? 0 : node1.getCost(); + cost += node1 == null ? 0 : node1.getCost(); } - while(level2 > level1 && node2 != null) { + while (level2 > level1 && node2 != null) { node2 = node2.getParent(); level2--; - cost += node2 == null? 0 : node2.getCost(); + cost += node2 == null ? 0 : node2.getCost(); } - while(node1 != null && node2 != null && node1 != node2) { + while (node1 != null && node2 != null && node1 != node2) { node1 = node1.getParent(); node2 = node2.getParent(); - cost += node1 == null? 0 : node1.getCost(); - cost += node2 == null? 0 : node2.getCost(); + cost += node1 == null ? 0 : node1.getCost(); + cost += node2 == null ? 0 : node2.getCost(); } return cost; } finally { @@ -752,7 +752,7 @@ private int getAvailableNodesCount(String scope, List excludedScopes, List excludedAncestorList = NetUtils.getAncestorList(this, mutableExcludedNodes, ancestorGen); for (Node ancestor : excludedAncestorList) { - if (scope.startsWith(ancestor.getNetworkFullPath())){ + if (scope.startsWith(ancestor.getNetworkFullPath())) { return 0; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java index 47e5de880d6..fc8e23ba132 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchema.java @@ -28,7 +28,7 @@ public final class NodeSchema { /** * Network topology layer type enum definition. */ - public enum LayerType{ + public enum LayerType { ROOT("Root", NetConstants.INNER_NODE_COST_DEFAULT), INNER_NODE("InnerNode", NetConstants.INNER_NODE_COST_DEFAULT), LEAF_NODE("Leaf", NetConstants.NODE_COST_DEFAULT); @@ -47,7 +47,7 @@ public String toString() { return description; } - public int getCost(){ + public int getCost() { return cost; } public static LayerType getType(String typeStr) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java index cb9690fe37d..289f7e6b75f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaLoader.java @@ -68,7 +68,7 @@ public final class NodeSchemaLoader { private static final int LAYOUT_VERSION = 1; private static volatile NodeSchemaLoader instance = null; - private NodeSchemaLoader() {} + private NodeSchemaLoader() { } public static NodeSchemaLoader getInstance() { if (instance == null) { @@ -324,7 +324,7 @@ private Map loadLayersSection(Element root) { // Integrity check, only one ROOT and one LEAF is allowed boolean foundRoot = false; boolean foundLeaf = false; - for(NodeSchema schema: schemas.values()) { + for (NodeSchema schema: schemas.values()) { if (schema.getType() == LayerType.ROOT) { if (foundRoot) { throw new IllegalArgumentException("Multiple ROOT layers are found" + @@ -385,7 +385,7 @@ private NodeSchemaLoadResult loadTopologySection(Element root, + "> is null"); } if (TOPOLOGY_PATH.equals(tagName)) { - if(value.startsWith(NetConstants.PATH_SEPARATOR_STR)) { + if (value.startsWith(NetConstants.PATH_SEPARATOR_STR)) { value = value.substring(1); } String[] layerIDs = value.split(NetConstants.PATH_SEPARATOR_STR); @@ -403,7 +403,7 @@ private NodeSchemaLoadResult loadTopologySection(Element root, throw new IllegalArgumentException("Topology path doesn't start " + "with ROOT layer"); } - if (schemas.get(layerIDs[layerIDs.length -1]).getType() != + if (schemas.get(layerIDs[layerIDs.length - 1]).getType() != LayerType.LEAF_NODE) { throw new IllegalArgumentException("Topology path doesn't end " + "with LEAF layer"); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java index 044f151868a..f5c0b62100c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java @@ -415,7 +415,7 @@ public static class Builder { private Instant creationTimestamp = null; private UUID suggestedLeaderId = null; - public Builder() {} + public Builder() { } public Builder(Pipeline pipeline) { this.id = pipeline.id; @@ -486,10 +486,10 @@ public Pipeline build() { if (nodeOrder != null && !nodeOrder.isEmpty()) { // This branch is for build from ProtoBuf List nodesWithOrder = new ArrayList<>(); - for(int i = 0; i < nodeOrder.size(); i++) { + for (int i = 0; i < nodeOrder.size(); i++) { int nodeIndex = nodeOrder.get(i); Iterator it = nodeStatus.keySet().iterator(); - while(it.hasNext() && nodeIndex >= 0) { + while (it.hasNext() && nodeIndex >= 0) { DatanodeDetails node = it.next(); if (nodeIndex == 0) { nodesWithOrder.add(node); @@ -503,7 +503,7 @@ public Pipeline build() { nodesWithOrder, id); } pipeline.setNodesInOrder(nodesWithOrder); - } else if (nodesInOrder != null){ + } else if (nodesInOrder != null) { // This branch is for pipeline clone pipeline.setNodesInOrder(nodesInOrder); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index 7f8663ef05b..9f78b3166ff 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; @@ -94,6 +95,15 @@ ContainerWithPipeline allocateContainer( ContainerWithPipeline getContainerWithPipeline(long containerID) throws IOException; + /** + * Gets the list of ReplicaInfo known by SCM for a given container. + * @param containerId ID of the container + * @return List of ReplicaInfo for the container or an empty list if none. + * @throws IOException + */ + List + getContainerReplicas(long containerId) throws IOException; + /** * Ask SCM the location of a batch of containers. SCM responds with a group of * nodes where these containers and their replicas are located. @@ -308,12 +318,20 @@ Map> getSafeModeRuleStatuses() */ boolean getReplicationManagerStatus() throws IOException; + /** + * Returns the latest container summary report generated by Replication + * Manager. + * @return The latest ReplicationManagerReport. + * @throws IOException + */ + ReplicationManagerReport getReplicationManagerReport() throws IOException; + /** * Start ContainerBalancer. */ boolean startContainerBalancer(Optional threshold, - Optional idleiterations, - Optional maxDatanodesRatioToInvolvePerIteration, + Optional iterations, + Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, Optional maxSizeLeavingSourceInGB) throws IOException; @@ -365,4 +383,6 @@ StatusAndMessages queryUpgradeFinalizationProgress( * commands operating on {@code containerID}. */ Token getContainerToken(ContainerID containerID) throws IOException; + + long getContainerCount() throws IOException; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index fcf3f130f8e..7f2d2a8bec9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -434,7 +434,7 @@ public static void closeContainer(XceiverClientSpi client, request.setContainerID(containerID); request.setCloseContainer(CloseContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(id); - if(encodedToken != null) { + if (encodedToken != null) { request.setEncodedToken(encodedToken); } client.sendCommand(request.build(), getValidatorList()); @@ -458,7 +458,7 @@ public static ReadContainerResponseProto readContainer( request.setContainerID(containerID); request.setReadContainer(ReadContainerRequestProto.getDefaultInstance()); request.setDatanodeUuid(id); - if(encodedToken != null) { + if (encodedToken != null) { request.setEncodedToken(encodedToken); } ContainerCommandResponseProto response = @@ -560,8 +560,8 @@ public static List getValidatorList() { ContainerCommandRequestProto request = builder.build(); Map responses = xceiverClient.sendCommandOnAllNodes(request); - for(Map.Entry entry: - responses.entrySet()){ + for (Map.Entry entry: + responses.entrySet()) { datanodeToResponseMap.put(entry.getKey(), entry.getValue().getGetBlock()); } return datanodeToResponseMap; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java index 3195e008cc5..8cd68a01251 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/HddsVersionInfo.java @@ -37,7 +37,7 @@ public final class HddsVersionInfo { public static final VersionInfo HDDS_VERSION_INFO = new VersionInfo("hdds"); - private HddsVersionInfo() {} + private HddsVersionInfo() { } public static void main(String[] args) { System.out.println("Using HDDS " + HDDS_VERSION_INFO.getVersion()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java index 96d59963efc..e1e959823e3 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/ResourceSemaphore.java @@ -90,7 +90,7 @@ public boolean isClosed() { @Override public String toString() { - return (isClosed()? "closed/": availablePermits() + "/") + limit; + return (isClosed() ? "closed/" : availablePermits() + "/") + limit; } /** @@ -101,7 +101,7 @@ public static class Group { public Group(int... limits) { final List list = new ArrayList<>(limits.length); - for(int limit : limits) { + for (int limit : limits) { list.add(new ResourceSemaphore(limit)); } this.resources = Collections.unmodifiableList(list); @@ -131,7 +131,7 @@ boolean tryAcquire(int... permits) { } // failed at i, releasing all previous resources - for(i--; i >= 0; i--) { + for (i--; i >= 0; i--) { resources.get(i).release(permits[i]); } return false; @@ -147,13 +147,13 @@ public void acquire(int... permits) throws InterruptedException { } protected void release(int... permits) { - for(int i = resources.size() - 1; i >= 0; i--) { + for (int i = resources.size() - 1; i >= 0; i--) { resources.get(i).release(permits[i]); } } public void close() { - for(int i = resources.size() - 1; i >= 0; i--) { + for (int i = resources.size() - 1; i >= 0; i--) { resources.get(i).close(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java index ba062bcae14..6fff80f6756 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/UniqueId.java @@ -51,7 +51,7 @@ public final class UniqueId { /** * Private constructor so that no one can instantiate this class. */ - private UniqueId() {} + private UniqueId() { } /** * Calculate and returns next unique id based on System#currentTimeMillis. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index a373fd19e61..bdc87899a45 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -89,7 +89,7 @@ public final class OzoneConsts { // OM Http server endpoints public static final String OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT = "/serviceList"; - public static final String OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT = + public static final String OZONE_DB_CHECKPOINT_HTTP_ENDPOINT = "/dbCheckpoint"; // Ozone File System scheme @@ -128,8 +128,8 @@ public final class OzoneConsts { public static final String CONTAINER_DB_SUFFIX = "container.db"; public static final String PIPELINE_DB_SUFFIX = "pipeline.db"; public static final String CRL_DB_SUFFIX = "crl.db"; - public static final String DN_CONTAINER_DB = "-dn-"+ CONTAINER_DB_SUFFIX; - public static final String DN_CRL_DB = "dn-"+ CRL_DB_SUFFIX; + public static final String DN_CONTAINER_DB = "-dn-" + CONTAINER_DB_SUFFIX; + public static final String DN_CRL_DB = "dn-" + CRL_DB_SUFFIX; public static final String CRL_DB_DIRECTORY_NAME = "crl"; public static final String OM_DB_NAME = "om.db"; public static final String SCM_DB_NAME = "scm.db"; @@ -187,7 +187,7 @@ public static Versioning getVersioning(boolean versioning) { public static final String OM_KEY_PREFIX = "/"; public static final String OM_USER_PREFIX = "$"; - public static final String OM_S3_PREFIX ="S3:"; + public static final String OM_S3_PREFIX = "S3:"; public static final String OM_S3_VOLUME_PREFIX = "s3"; public static final String OM_S3_SECRET = "S3Secret:"; public static final String OM_PREFIX = "Prefix:"; @@ -212,7 +212,7 @@ public static Versioning getVersioning(boolean versioning) { /** * Quota Units. */ - public enum Units {TB, GB, MB, KB, B} + public enum Units { TB, GB, MB, KB, B } /** * Max number of keys returned per list buckets operation. @@ -333,6 +333,7 @@ private OzoneConsts() { public static final String UNDELETED_KEYS_LIST = "unDeletedKeysList"; public static final String SOURCE_VOLUME = "sourceVolume"; public static final String SOURCE_BUCKET = "sourceBucket"; + public static final String BUCKET_LAYOUT = "bucketLayout"; @@ -389,7 +390,7 @@ private OzoneConsts() { public static final Pattern KEYNAME_ILLEGAL_CHARACTER_CHECK_REGEX = Pattern.compile("^[^^{}<>^?%~#`\\[\\]\\|\\\\(\\x80-\\xff)]+$"); - public static final String FS_FILE_COPYING_TEMP_SUFFIX= "._COPYING_"; + public static final String FS_FILE_COPYING_TEMP_SUFFIX = "._COPYING_"; // Transaction Info public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java index 098ab6b2f7f..6c20968c8d5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditEventStatus.java @@ -26,7 +26,7 @@ public enum AuditEventStatus { private String status; - AuditEventStatus(String status){ + AuditEventStatus(String status) { this.status = status; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java index ee6f45dadb4..9f1f5f0e223 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLogger.java @@ -38,7 +38,7 @@ public class AuditLogger { * Parametrized Constructor to initialize logger. * @param type Audit Logger Type */ - public AuditLogger(AuditLoggerType type){ + public AuditLogger(AuditLoggerType type) { initializeLogger(type); } @@ -46,7 +46,7 @@ public AuditLogger(AuditLoggerType type){ * Initializes the logger with specific type. * @param loggerType specified one of the values from enum AuditLoggerType. */ - private void initializeLogger(AuditLoggerType loggerType){ + private void initializeLogger(AuditLoggerType loggerType) { this.logger = LogManager.getContext(false).getLogger(loggerType.getType()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java index 18241c7712a..dbfde9f555b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditLoggerType.java @@ -31,7 +31,7 @@ public String getType() { return type; } - AuditLoggerType(String type){ + AuditLoggerType(String type) { this.type = type; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java index 505b9580715..3414aa403bc 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMarker.java @@ -28,11 +28,11 @@ public enum AuditMarker { private Marker marker; - AuditMarker(Marker marker){ + AuditMarker(Marker marker) { this.marker = marker; } - public Marker getMarker(){ + public Marker getMarker() { return marker; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java index 6f3bbadaecb..9d28c9f43ed 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/AuditMessage.java @@ -64,12 +64,12 @@ public static class Builder { private Map params; private String ret; - public Builder setUser(String usr){ + public Builder setUser(String usr) { this.user = usr; return this; } - public Builder atIp(String ipAddr){ + public Builder atIp(String ipAddr) { this.ip = ipAddr; return this; } @@ -79,7 +79,7 @@ public Builder forOperation(AuditAction action) { return this; } - public Builder withParams(Map args){ + public Builder withParams(Map args) { this.params = args; return this; } @@ -89,12 +89,12 @@ public Builder withResult(AuditEventStatus result) { return this; } - public Builder withException(Throwable ex){ + public Builder withException(Throwable ex) { this.throwable = ex; return this; } - public AuditMessage build(){ + public AuditMessage build() { String message = "user=" + this.user + " | ip=" + this.ip + " | " + "op=" + this.op + " " + this.params + " | " + "ret=" + this.ret; return new AuditMessage(message, throwable); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java index 9b88c6a1d12..3c1c2098679 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/SCMAction.java @@ -48,7 +48,8 @@ public enum SCMAction implements AuditAction { STOP_CONTAINER_BALANCER, GET_CONTAINER_BALANCER_STATUS, GET_CONTAINER_WITH_PIPELINE_BATCH, - ADD_SCM; + ADD_SCM, + GET_REPLICATION_MANAGER_REPORT; @Override public String getAction() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java index 7ce643db471..6187d6bd9cd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBuffer.java @@ -80,7 +80,7 @@ public final void update(ByteBuffer b) { } private static int update(int crc, ByteBuffer b, int[] table) { - for(; b.remaining() > 7;) { + for (; b.remaining() > 7;) { final int c0 = (b.get() ^ crc) & 0xff; final int c1 = (b.get() ^ (crc >>>= 8)) & 0xff; final int c2 = (b.get() ^ (crc >>>= 8)) & 0xff; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 7622ffc001c..5a63c09f123 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -57,8 +57,9 @@ final class IncrementalChunkBuffer implements ChunkBuffer { Preconditions.checkArgument(increment > 0); this.limit = limit; this.increment = increment; - this.limitIndex = limit/increment; - this.buffers = new ArrayList<>(limitIndex + (limit%increment == 0? 0: 1)); + this.limitIndex = limit / increment; + this.buffers = new ArrayList<>( + limitIndex + (limit % increment == 0 ? 0 : 1)); this.isDuplicated = isDuplicated; } @@ -66,7 +67,7 @@ final class IncrementalChunkBuffer implements ChunkBuffer { private int getBufferCapacityAtIndex(int i) { Preconditions.checkArgument(i >= 0); Preconditions.checkArgument(i <= limitIndex); - return i < limitIndex? increment: limit%increment; + return i < limitIndex ? increment : limit % increment; } private void assertInt(int expected, int computed, String name, int i) { @@ -126,7 +127,7 @@ private ByteBuffer getAndAllocateAtPosition(int position) { Preconditions.checkArgument(position < limit); final int i = position / increment; final ByteBuffer ith = getAndAllocateAtIndex(i); - assertInt(position%increment, ith.position(), "position", i); + assertInt(position % increment, ith.position(), "position", i); return ith; } @@ -207,7 +208,7 @@ public ChunkBuffer put(ByteBuffer that) { } final int thatLimit = that.limit(); - for(int p = position(); that.position() < thatLimit;) { + for (int p = position(); that.position() < thatLimit;) { final ByteBuffer b = getAndAllocateAtPosition(p); final int min = Math.min(b.remaining(), thatLimit - that.position()); that.limit(that.position() + min); @@ -229,7 +230,7 @@ public ChunkBuffer duplicate(int newPosition, int newLimit) { final int pr = newPosition % increment; final int li = newLimit / increment; final int lr = newLimit % increment; - final int newSize = lr == 0? li: li + 1; + final int newSize = lr == 0 ? li : li + 1; for (int i = 0; i < newSize; i++) { final int pos = i < pi ? increment : i == pi ? pr : 0; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index e6e1df5135e..6ba438456e1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -109,7 +109,7 @@ public String getClusterID() { public Long getCreationTime() { String creationTime = properties.getProperty(CREATION_TIME); - if(creationTime != null) { + if (creationTime != null) { return Long.parseLong(creationTime); } return null; @@ -117,7 +117,7 @@ public Long getCreationTime() { public int getLayoutVersion() { String layout = properties.getProperty(LAYOUT_VERSION); - if(layout != null) { + if (layout != null) { return Integer.parseInt(layout); } return 0; @@ -166,7 +166,7 @@ private void verifyNodeType(NodeType type) throws InconsistentStorageStateException { NodeType nodeType = getNodeType(); Preconditions.checkNotNull(nodeType); - if(type != nodeType) { + if (type != nodeType) { throw new InconsistentStorageStateException("Expected NodeType: " + type + ", but found: " + nodeType); } @@ -176,7 +176,7 @@ private void verifyClusterId() throws InconsistentStorageStateException { String clusterId = getClusterID(); Preconditions.checkNotNull(clusterId); - if(clusterId.isEmpty()) { + if (clusterId.isEmpty()) { throw new InconsistentStorageStateException("Cluster ID not found"); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java index a9de8922b5b..ebc4bba209d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ha/ratis/RatisSnapshotInfo.java @@ -50,7 +50,7 @@ public void updateTermIndex(long newTerm, long newIndex) { this.snapshotIndex = newIndex; } - public RatisSnapshotInfo() {} + public RatisSnapshotInfo() { } public RatisSnapshotInfo(long term, long index) { this.term = term; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java index 8ea16897e11..434e497e23c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/BlockData.java @@ -161,8 +161,8 @@ private List castChunkList() { * @return list of chunkinfo. */ public List getChunks() { - return chunkList == null? Collections.emptyList() - : chunkList instanceof ContainerProtos.ChunkInfo? + return chunkList == null ? Collections.emptyList() + : chunkList instanceof ContainerProtos.ChunkInfo ? Collections.singletonList((ContainerProtos.ChunkInfo)chunkList) : Collections.unmodifiableList(castChunkList()); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java index 7773828e2db..a13f164eec6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java @@ -45,7 +45,7 @@ public static Map getAuditParams( Map auditParams = new TreeMap<>(); Type cmdType = msg.getCmdType(); String containerID = String.valueOf(msg.getContainerID()); - switch(cmdType) { + switch (cmdType) { case CreateContainer: auditParams.put("containerID", containerID); auditParams.put("containerType", @@ -75,11 +75,11 @@ public static Map getAuditParams( return auditParams; case PutBlock: - try{ + try { auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutBlock().getBlockData()) .toString()); - } catch (IOException ex){ + } catch (IOException ex) { if (LOG.isTraceEnabled()) { LOG.trace("Encountered error parsing BlockData from protobuf: " + ex.getMessage()); @@ -132,11 +132,11 @@ public static Map getAuditParams( case CompactChunk: return null; //CompactChunk operation case PutSmallFile: - try{ + try { auditParams.put("blockData", BlockData.getFromProtoBuf(msg.getPutSmallFile() .getBlock().getBlockData()).toString()); - } catch (IOException ex){ + } catch (IOException ex) { if (LOG.isTraceEnabled()) { LOG.trace("Encountered error parsing BlockData from protobuf: " + ex.getMessage()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java index e95105b0742..2fd7a9d4940 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/Lease.java @@ -90,7 +90,7 @@ public boolean hasExpired() { */ public void registerCallBack(Callable callback) throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } callbacks.add(callback); @@ -104,7 +104,7 @@ public void registerCallBack(Callable callback) * If the lease has already timed out */ public long getElapsedTime() throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } return Time.monotonicNow() - creationTime; @@ -129,7 +129,7 @@ public long getRemainingTime() throws LeaseExpiredException { * If the lease has already timed out */ public long getLeaseLifeTime() throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } return leaseTimeout.get(); @@ -144,7 +144,7 @@ public long getLeaseLifeTime() throws LeaseExpiredException { * If the lease has already timed out */ public void renew(long timeout) throws LeaseExpiredException { - if(hasExpired()) { + if (hasExpired()) { throw new LeaseExpiredException(messageForResource(resource)); } leaseTimeout.addAndGet(timeout); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java index a79d5178e7a..3f2d5fbe974 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lease/LeaseCallbackExecutor.java @@ -53,7 +53,7 @@ public void run() { if (LOG.isDebugEnabled()) { LOG.debug("Executing callbacks for lease on {}", resource); } - for(Callable callback : callbacks) { + for (Callable callback : callbacks) { try { callback.call(); } catch (Exception e) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java index 45f0638b992..b3ffe59f1d7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/util/ShutdownHookManager.java @@ -104,7 +104,7 @@ public void run() { long ended = System.currentTimeMillis(); LOG.debug(String.format( "Completed shutdown in %.3f seconds; Timeouts: %d", - (ended-started)/1000.0, timeoutCount)); + (ended - started) / 1000.0, timeoutCount)); // each of the hooks have executed; now shut down the // executor itself. shutdownExecutor(new OzoneConfiguration()); diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index e74621506c8..b4517c687ed 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -722,11 +722,11 @@ - ozone.scm.chunk.layout + ozone.scm.container.layout FILE_PER_BLOCK OZONE, SCM, CONTAINER, PERFORMANCE - Chunk layout defines how chunks, blocks and containers are stored on disk. + Container layout defines how chunks, blocks and containers are stored on disk. Each chunk is stored separately with FILE_PER_CHUNK. All chunks of a block are stored in the same file with FILE_PER_BLOCK. The default is FILE_PER_BLOCK. @@ -2014,7 +2014,7 @@ ozone.scm.ratis.enable - false + true OZONE, SCM, HA, RATIS Property to enable or disable Ratis server on SCM. Please note - this is a temporary property to disable SCM Ratis server. @@ -2518,6 +2518,24 @@ OM snapshot. + + ozone.recon.scm.connection.request.timeout + 5s + OZONE, RECON, SCM + + Connection request timeout in milliseconds for HTTP call made by Recon to + request SCM DB snapshot. + + + + ozone.recon.scm.connection.timeout + 5s + OZONE, RECON, SCM + + Connection timeout for HTTP call in milliseconds made by Recon to request + SCM snapshot. + + ozone.recon.om.socket.timeout 5s @@ -2551,6 +2569,41 @@ Request to flush the OM DB before taking checkpoint snapshot. + + recon.om.delta.update.limit + 2000 + OZONE, RECON + + Recon each time get a limited delta updates from OM. + The actual fetched data might be larger than this limit. + + + + recon.om.delta.update.loop.limit + 10 + OZONE, RECON + + The sync between Recon and OM consists of several small + fetch loops. + + + + ozone.recon.scm.container.threshold + 100 + OZONE, RECON, SCM + + Threshold value for the difference in number of containers + in SCM and RECON. + + + + ozone.recon.scm.snapshot.enabled + false + OZONE, RECON, SCM + + If enabled, SCM DB Snapshot is taken by Recon. + + hdds.tracing.enabled false diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java index ef93927ee4c..fd8aa28e63d 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java @@ -122,7 +122,7 @@ public void testGetSCMAddresses() { assertThat(addresses.size(), is(3)); it = addresses.iterator(); HashMap expected1 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { + while (it.hasNext()) { InetSocketAddress current = it.next(); assertTrue(expected1.remove(current.getHostName(), current.getPort())); @@ -136,7 +136,7 @@ public void testGetSCMAddresses() { assertThat(addresses.size(), is(3)); it = addresses.iterator(); HashMap expected2 = new HashMap<>(hostsAndPorts); - while(it.hasNext()) { + while (it.hasNext()) { InetSocketAddress current = it.next(); assertTrue(expected2.remove(current.getHostName(), current.getPort())); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java index 9adf8f7fbf5..1315ad5ec87 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/client/TestReplicationConfig.java @@ -167,7 +167,7 @@ public void testAdjustReplication() { @Test public void testValidationBasedOnConfig() { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OZONE_REPLICATION+".allowed-configs", + conf.set(OZONE_REPLICATION + ".allowed-configs", "^STANDALONE/ONE|RATIS/THREE$"); conf.set(OZONE_REPLICATION, factor); conf.set(OZONE_REPLICATION_TYPE, type); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java index 8a177042a64..0e817116da1 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java @@ -22,9 +22,11 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; +import java.net.URL; import java.nio.charset.StandardCharsets; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.junit.Assert; @@ -190,6 +192,32 @@ public void setConfigFromObjectWithConfigDefaults() { subject.getTimeDuration("test.scm.client.wait", 555, TimeUnit.SECONDS)); } + @Test + public void testInstantiationWithInputConfiguration() throws IOException { + String key = "hdds.scm.init.default.layout.version"; + String val = "Test1"; + Configuration configuration = new Configuration(true); + + File ozoneSite = tempConfigs.newFile("ozone-site.xml"); + FileOutputStream ozoneSiteStream = new FileOutputStream(ozoneSite); + try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter( + ozoneSiteStream, StandardCharsets.UTF_8))) { + startConfig(out); + appendProperty(out, key, val); + endConfig(out); + } + configuration + .addResource(new URL("file:///" + ozoneSite.getAbsolutePath())); + + OzoneConfiguration ozoneConfiguration = + new OzoneConfiguration(configuration); + // ozoneConfig value matches input config value for the corresponding key + Assert.assertEquals(val, ozoneConfiguration.get(key)); + Assert.assertEquals(val, configuration.get(key)); + + Assert.assertNotEquals(val, new OzoneConfiguration().get(key)); + } + @Test public void setConfigFromObjectWithObjectDefaults() { // GIVEN diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java index b057349ed82..c9ed258f24f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java @@ -96,7 +96,7 @@ public void testExcludePattern() throws IOException { long usedSpace = du.getUsedSpace(); - assertFileSize(4*KB, usedSpace); + assertFileSize(4 * KB, usedSpace); } private static void assertFileSize(long expected, long actual) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java index 469faac7444..d3ddbe0ef32 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestContainerCommandRequestMessage.java @@ -45,7 +45,7 @@ public class TestContainerCommandRequestMessage { static ByteString newData(int length) { final ByteString.Output out = ByteString.newOutput(); - for(int i = 0; i < length; i++) { + for (int i = 0; i < length; i++) { out.write(RANDOM.nextInt()); } return out.toByteString(); @@ -128,10 +128,10 @@ public void testWriteChunk() throws Exception { static void runTest( BiFunction method) throws Exception { - for(int i = 0; i < 2; i++) { + for (int i = 0; i < 2; i++) { runTest(i, method); } - for(int i = 2; i < 1 << 10;) { + for (int i = 2; i < 1 << 10;) { runTest(i + 1 + RANDOM.nextInt(i - 1), method); i <<= 1; runTest(i, method); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java new file mode 100644 index 00000000000..195baca2db0 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReplicaInfo.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.junit.Assert; +import org.junit.Test; + +import java.util.UUID; + +/** + * Test for the ContainerReplicaInfo class. + */ +public class TestContainerReplicaInfo { + + @Test + public void testObjectCreatedFromProto() { + HddsProtos.SCMContainerReplicaProto proto = + HddsProtos.SCMContainerReplicaProto.newBuilder() + .setKeyCount(10) + .setBytesUsed(12345) + .setContainerID(567) + .setPlaceOfBirth(UUID.randomUUID().toString()) + .setSequenceID(5) + .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails() + .getProtoBufMessage()) + .setState("OPEN") + .build(); + + ContainerReplicaInfo info = ContainerReplicaInfo.fromProto(proto); + + Assert.assertEquals(proto.getContainerID(), info.getContainerID()); + Assert.assertEquals(proto.getBytesUsed(), info.getBytesUsed()); + Assert.assertEquals(proto.getKeyCount(), info.getKeyCount()); + Assert.assertEquals(proto.getPlaceOfBirth(), + info.getPlaceOfBirth().toString()); + Assert.assertEquals(DatanodeDetails.getFromProtoBuf( + proto.getDatanodeDetails()), info.getDatanodeDetails()); + Assert.assertEquals(proto.getSequenceID(), info.getSequenceId()); + Assert.assertEquals(proto.getState(), info.getState()); + } +} \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java new file mode 100644 index 00000000000..a05f9abb2b5 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManagerReport.java @@ -0,0 +1,162 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.concurrent.ThreadLocalRandom; + +/** + * Tests for the ReplicationManagerReport class. + */ +public class TestReplicationManagerReport { + + private ReplicationManagerReport report; + + @Before + public void setup() { + report = new ReplicationManagerReport(); + } + + @Test + public void testMetricCanBeIncremented() { + report.increment(ReplicationManagerReport.HealthState.UNDER_REPLICATED); + report.increment(ReplicationManagerReport.HealthState.UNDER_REPLICATED); + report.increment(ReplicationManagerReport.HealthState.OVER_REPLICATED); + + report.increment(HddsProtos.LifeCycleState.OPEN); + report.increment(HddsProtos.LifeCycleState.CLOSED); + report.increment(HddsProtos.LifeCycleState.CLOSED); + + Assert.assertEquals(2, + report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + Assert.assertEquals(1, + report.getStat(ReplicationManagerReport.HealthState.OVER_REPLICATED)); + Assert.assertEquals(0, + report.getStat(ReplicationManagerReport.HealthState.MIS_REPLICATED)); + + Assert.assertEquals(1, + report.getStat(HddsProtos.LifeCycleState.OPEN)); + Assert.assertEquals(2, + report.getStat(HddsProtos.LifeCycleState.CLOSED)); + Assert.assertEquals(0, + report.getStat(HddsProtos.LifeCycleState.QUASI_CLOSED)); + } + + @Test + public void testContainerIDsCanBeSampled() { + report.incrementAndSample( + ReplicationManagerReport.HealthState.UNDER_REPLICATED, + new ContainerID(1)); + report.incrementAndSample( + ReplicationManagerReport.HealthState.UNDER_REPLICATED, + new ContainerID(2)); + report.incrementAndSample( + ReplicationManagerReport.HealthState.OVER_REPLICATED, + new ContainerID(3)); + + Assert.assertEquals(2, + report.getStat(ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + Assert.assertEquals(1, + report.getStat(ReplicationManagerReport.HealthState.OVER_REPLICATED)); + Assert.assertEquals(0, + report.getStat(ReplicationManagerReport.HealthState.MIS_REPLICATED)); + + List sample = + report.getSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED); + Assert.assertEquals(new ContainerID(1), sample.get(0)); + Assert.assertEquals(new ContainerID(2), sample.get(1)); + Assert.assertEquals(2, sample.size()); + + sample = + report.getSample(ReplicationManagerReport.HealthState.OVER_REPLICATED); + Assert.assertEquals(new ContainerID(3), sample.get(0)); + Assert.assertEquals(1, sample.size()); + + sample = + report.getSample(ReplicationManagerReport.HealthState.MIS_REPLICATED); + Assert.assertEquals(0, sample.size()); + } + + @Test + public void testSamplesAreLimited() { + for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT * 2; i++) { + report.incrementAndSample( + ReplicationManagerReport.HealthState.UNDER_REPLICATED, + new ContainerID(i)); + } + List sample = + report.getSample(ReplicationManagerReport.HealthState.UNDER_REPLICATED); + Assert.assertEquals(ReplicationManagerReport.SAMPLE_LIMIT, sample.size()); + for (int i = 0; i < ReplicationManagerReport.SAMPLE_LIMIT; i++) { + Assert.assertEquals(new ContainerID(i), sample.get(i)); + } + } + + @Test + public void testSerializeToProtoAndBack() { + report.setTimestamp(12345); + Random rand = ThreadLocalRandom.current(); + for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) { + report.setStat(s.toString(), rand.nextInt(Integer.MAX_VALUE)); + } + for (ReplicationManagerReport.HealthState s : + ReplicationManagerReport.HealthState.values()) { + report.setStat(s.toString(), rand.nextInt(Integer.MAX_VALUE)); + List containers = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + containers.add(ContainerID.valueOf(rand.nextInt(Integer.MAX_VALUE))); + } + report.setSample(s.toString(), containers); + } + HddsProtos.ReplicationManagerReportProto proto = report.toProtobuf(); + ReplicationManagerReport newReport + = ReplicationManagerReport.fromProtobuf(proto); + Assert.assertEquals(report.getReportTimeStamp(), + newReport.getReportTimeStamp()); + + for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) { + Assert.assertEquals(report.getStat(s), newReport.getStat(s)); + } + + for (ReplicationManagerReport.HealthState s : + ReplicationManagerReport.HealthState.values()) { + Assert.assertTrue(report.getSample(s).equals(newReport.getSample(s))); + } + } + + @Test(expected = IllegalStateException.class) + public void testStatCannotBeSetTwice() { + report.setStat(HddsProtos.LifeCycleState.CLOSED.toString(), 10); + report.setStat(HddsProtos.LifeCycleState.CLOSED.toString(), 10); + } + + @Test(expected = IllegalStateException.class) + public void testSampleCannotBeSetTwice() { + List containers = new ArrayList<>(); + containers.add(ContainerID.valueOf(1)); + report.setSample(HddsProtos.LifeCycleState.CLOSED.toString(), containers); + report.setSample(HddsProtos.LifeCycleState.CLOSED.toString(), containers); + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java new file mode 100644 index 00000000000..2f459fbcba6 --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/container/package-info.java @@ -0,0 +1,21 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + Test cases for SCM container client classes. + */ +package org.apache.hadoop.hdds.scm.container; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java index 12a024005ac..e561bb7ccc1 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMNodeInfo.java @@ -93,7 +93,7 @@ public void testScmHANodeInfo() { int count = 1; for (SCMNodeInfo scmNodeInfo : scmNodeInfos) { Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId()); - Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId()); + Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId()); Assert.assertEquals("localhost:" + ++port, scmNodeInfo.getBlockClientAddress()); Assert.assertEquals("localhost:" + ++port, @@ -117,7 +117,7 @@ public void testSCMHANodeInfoWithDefaultPorts() { int count = 1; for (SCMNodeInfo scmNodeInfo : scmNodeInfos) { Assert.assertEquals(scmServiceId, scmNodeInfo.getServiceId()); - Assert.assertEquals("scm"+count++, scmNodeInfo.getNodeId()); + Assert.assertEquals("scm" + count++, scmNodeInfo.getNodeId()); Assert.assertEquals("localhost:" + OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT, scmNodeInfo.getBlockClientAddress()); Assert.assertEquals("localhost:" + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java index 0008e6670a5..e50eca2e690 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java @@ -171,7 +171,7 @@ public static Collection setupDatanodes() { @Test public void testContains() { Node nodeNotInMap = createDatanode("8.8.8.8", "/d2/r4"); - for (int i=0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { assertTrue(cluster.contains(dataNodes[i])); } assertFalse(cluster.contains(nodeNotInMap)); @@ -238,7 +238,7 @@ public void testAncestor() { assumeTrue(cluster.getMaxLevel() > 2); int maxLevel = cluster.getMaxLevel(); assertTrue(cluster.isSameParent(dataNodes[0], dataNodes[1])); - while(maxLevel > 1) { + while (maxLevel > 1) { assertTrue(cluster.isSameAncestor(dataNodes[0], dataNodes[1], maxLevel - 1)); maxLevel--; @@ -262,17 +262,17 @@ public void testAncestor() { @Test public void testAddRemove() { - for(int i = 0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { cluster.remove(dataNodes[i]); } - for(int i = 0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { assertFalse(cluster.contains(dataNodes[i])); } // no leaf nodes assertEquals(0, cluster.getNumOfLeafNode(null)); // no inner nodes assertEquals(0, cluster.getNumOfNodes(2)); - for(int i = 0; i < dataNodes.length; i++) { + for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); } // Inner nodes are created automatically @@ -467,10 +467,10 @@ public void testChooseRandomExcludedNode() { }}; int leafNum = cluster.getNumOfLeafNode(null); Map frequency; - for(Node[] list : excludedNodeLists) { + for (Node[] list : excludedNodeLists) { List excludedList = Arrays.asList(list); int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); List ancestorList = NetUtils.getAncestorList(cluster, excludedList, ancestorGen); @@ -490,7 +490,7 @@ public void testChooseRandomExcludedNode() { // all nodes excluded, no node will be picked List excludedList = Arrays.asList(dataNodes); int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { frequency = pickNodesAtRandom(leafNum, null, excludedList, ancestorGen); for (Node key : dataNodes) { assertTrue(frequency.get(key) == 0); @@ -500,7 +500,7 @@ public void testChooseRandomExcludedNode() { // out scope excluded nodes, each node will be picked excludedList = Arrays.asList(createDatanode("1.1.1.1.", "/city1/rack1")); ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { frequency = pickNodes(leafNum, null, excludedList, null, ancestorGen); for (Node key : dataNodes) { assertTrue(frequency.get(key) != 0); @@ -536,7 +536,7 @@ public void testChooseRandomExcludedNodeAndScope() { while (!path.equals(ROOT)) { scope = "~" + path; int ancestorGen = 0; - while(ancestorGen < cluster.getMaxLevel()) { + while (ancestorGen < cluster.getMaxLevel()) { for (Node[] list : excludedNodeLists) { List excludedList = Arrays.asList(list); frequency = diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java index b7b3dc6340d..00124d9cdd5 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipeline.java @@ -51,7 +51,7 @@ public static Pipeline createSingleNodePipeline() throws IOException { public static Pipeline createPipeline(int numNodes) throws IOException { Preconditions.checkArgument(numNodes >= 1); final List ids = new ArrayList<>(numNodes); - for(int i = 0; i < numNodes; i++) { + for (int i = 0; i < numNodes; i++) { ids.add(MockDatanodeDetails.randomLocalDatanodeDetails()); } return createPipeline(ids); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java index f756008a4cc..f77e84a1dbe 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -23,7 +23,7 @@ import org.apache.ozone.test.LambdaTestUtils; import org.junit.jupiter.api.Test; -import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; class TestStringCodec { @@ -44,9 +44,17 @@ void testExtract() throws Exception { "String does not match tracer state format", () -> codec.extract(sb)); sb.append(":66"); + JaegerSpanContext context = codec.extract(sb); - String expectedContextString = "123:456:789:66"; - assertTrue(context.getTraceId().equals("123")); - assertTrue(context.toString().equals(expectedContextString)); + StringBuilder injected = new StringBuilder(); + codec.inject(context, injected); + + String expectedTraceId = pad("123"); + assertEquals(expectedTraceId, context.getTraceId()); + assertEquals(expectedTraceId + ":456:789:66", injected.toString()); + } + + private static String pad(String s) { + return "0000000000000000".substring(s.length()) + s; } } \ No newline at end of file diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java index fe4ccc0cb50..ce6f58dadcb 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/MockGatheringChannel.java @@ -43,7 +43,7 @@ public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { checkElementIndex(offset, srcs.length, "offset"); - checkElementIndex(offset+length-1, srcs.length, "offset+length"); + checkElementIndex(offset + length - 1, srcs.length, "offset+length"); long bytes = 0; for (ByteBuffer b : srcs) { diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java index cbdd558cbe8..f9c194d45cf 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestResourceSemaphore.java @@ -52,18 +52,18 @@ public void testGroup() { try { g.release(1, 0); Assert.fail("Should have failed."); - } catch (IllegalStateException e){ + } catch (IllegalStateException e) { } try { g.release(0, 1); Assert.fail("Should have failed."); - } catch (IllegalStateException e){ + } catch (IllegalStateException e) { } } static void assertUsed(ResourceSemaphore.Group g, int... expected) { Assert.assertEquals(expected.length, g.resourceSize()); - for(int i = 0; i < expected.length; i++) { + for (int i = 0; i < expected.length; i++) { Assert.assertEquals(expected[i], g.get(i).used()); } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java index 0c2d98fab29..9555225b22b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/DummyEntity.java @@ -27,7 +27,7 @@ public class DummyEntity implements Auditable { private String key1; private String key2; - public DummyEntity(){ + public DummyEntity() { this.key1 = "value1"; this.key2 = "value2"; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java index 41dc4f5b7e0..01fceaea88b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java @@ -189,7 +189,7 @@ private void verifyLog(String... expectedStrings) throws IOException { lines = FileUtils.readLines(file, (String)null); try { Thread.sleep(500 * (i + 1)); - } catch(InterruptedException ie) { + } catch (InterruptedException ie) { Thread.currentThread().interrupt(); break; } @@ -212,7 +212,7 @@ private void verifyNoLog() throws IOException { assertEquals(0, lines.size()); } - private static class TestException extends Exception{ + private static class TestException extends Exception { TestException(String message) { super(message); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java index 2e144e65699..a61ff9054bc 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java @@ -73,7 +73,7 @@ public void testIncorrectChecksum() throws Exception { // Change the data and check if new checksum matches the original checksum. // Modifying one byte of data should be enough for the checksum data to // mismatch - data[50] = (byte) (data[50]+1); + data[50] = (byte) (data[50] + 1); ChecksumData newChecksumData = checksum.computeChecksum(data); Assert.assertNotEquals("Checksums should not match for different data", originalChecksumData, newChecksumData); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java index 9b69fad7915..1e850991790 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChunkBuffer.java @@ -46,7 +46,7 @@ private static int nextInt(int n) { public void testImplWithByteBuffer() { runTestImplWithByteBuffer(1); runTestImplWithByteBuffer(1 << 10); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { runTestImplWithByteBuffer(nextInt(100) + 1); } } @@ -62,7 +62,7 @@ public void testIncrementalChunkBuffer() { runTestIncrementalChunkBuffer(1, 1); runTestIncrementalChunkBuffer(4, 8); runTestIncrementalChunkBuffer(16, 1 << 10); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { final int a = ThreadLocalRandom.current().nextInt(100) + 1; final int b = ThreadLocalRandom.current().nextInt(100) + 1; runTestIncrementalChunkBuffer(Math.min(a, b), Math.max(a, b)); @@ -80,7 +80,7 @@ private static void runTestIncrementalChunkBuffer(int increment, int n) { public void testImplWithList() { runTestImplWithList(4, 8); runTestImplWithList(16, 1 << 10); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { final int a = ThreadLocalRandom.current().nextInt(10) + 1; final int b = ThreadLocalRandom.current().nextInt(100) + 1; runTestImplWithList(Math.min(a, b), Math.max(a, b)); @@ -131,7 +131,7 @@ private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) { assertIterate(expected, impl, bpc); } else if (bpc == 0) { for (int d = 1; d < 5; d++) { - final int bytesPerChecksum = n/d; + final int bytesPerChecksum = n / d; if (bytesPerChecksum > 0) { assertIterate(expected, impl, bytesPerChecksum); } @@ -148,7 +148,7 @@ private static void runTestImpl(byte[] expected, int bpc, ChunkBuffer impl) { private static void assertDuplicate(byte[] expected, ChunkBuffer impl) { final int n = expected.length; assertToByteString(expected, 0, n, impl); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { final int offset = nextInt(n); final int length = nextInt(n - offset + 1); assertToByteString(expected, offset, length, impl); @@ -165,14 +165,14 @@ private static void assertIterate( final int numChecksums = (n + bpc - 1) / bpc; final Iterator i = duplicated.iterate(bpc).iterator(); int count = 0; - for(int j = 0; j < numChecksums; j++) { + for (int j = 0; j < numChecksums; j++) { final ByteBuffer b = i.next(); - final int expectedRemaining = j < numChecksums - 1? - bpc : n - bpc *(numChecksums - 1); + final int expectedRemaining = j < numChecksums - 1 ? + bpc : n - bpc * (numChecksums - 1); Assert.assertEquals(expectedRemaining, b.remaining()); - final int offset = j* bpc; - for(int k = 0; k < expectedRemaining; k++) { + final int offset = j * bpc; + for (int k = 0; k < expectedRemaining; k++) { Assert.assertEquals(expected[offset + k], b.get()); count++; } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java index c1470bb2efc..be0575d9d0f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestStateMachine.java @@ -45,12 +45,12 @@ public class TestStateMachine { /** * STATES used by the test state machine. */ - public enum STATES {INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL}; + public enum STATES { INIT, CREATING, OPERATIONAL, CLOSED, CLEANUP, FINAL }; /** * EVENTS used by the test state machine. */ - public enum EVENTS {ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT}; + public enum EVENTS { ALLOCATE, CREATE, UPDATE, CLOSE, DELETE, TIMEOUT }; @Rule public ExpectedException exception = ExpectedException.none(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 95282d5f7be..c2e4c5542a8 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -352,7 +352,7 @@ public static ContainerCommandRequestProto getCreateContainerSecureRequest( LOG.trace("addContainer: {}", containerID); Builder request = getContainerCommandRequestBuilder(containerID, pipeline); - if(token != null){ + if (token != null) { request.setEncodedToken(token.encodeToUrlString()); } return request.build(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java index a51be5ff3aa..c9b9bf1de5f 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/lease/TestLeaseManager.java @@ -51,7 +51,7 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if(obj instanceof DummyResource) { + if (obj instanceof DummyResource) { return name.equals(((DummyResource) obj).name); } return false; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java similarity index 95% rename from hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java index 6daec6c80e4..d3990e60982 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/TestUpgradeUtils.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/upgrade/UpgradeTestUtils.java @@ -28,8 +28,8 @@ /** * Upgrade related test utility methods. */ -public final class TestUpgradeUtils { - private TestUpgradeUtils() { } +public final class UpgradeTestUtils { + private UpgradeTestUtils() { } /** * Creates a VERSION file for the specified node type under the directory diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 58c6e728460..3b9afa9312f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -111,12 +111,12 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private final Map ratisMetricsMap = new ConcurrentHashMap<>(); private DNMXBeanImpl serviceRuntimeInfo = - new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) {}; + new DNMXBeanImpl(HddsVersionInfo.HDDS_VERSION_INFO) { }; private ObjectName dnInfoBeanName; private DatanodeCRLStore dnCRLStore; //Constructor for DataNode PluginService - public HddsDatanodeService(){} + public HddsDatanodeService() { } public HddsDatanodeService(boolean printBanner, String[] args) { this.printBanner = printBanner; @@ -376,7 +376,7 @@ private void getSCMSignedCert(OzoneConfiguration config) { datanodeDetails.getProtoBufMessage(), getEncodedString(csr)); // Persist certificates. - if(response.hasX509CACertificate()) { + if (response.hasX509CACertificate()) { String pemEncodedCert = response.getX509Certificate(); dnCertClient.storeCertificate(pemEncodedCert, true); dnCertClient.storeCertificate(response.getX509CACertificate(), true, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index 970251c6732..3d6cb3b1352 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -44,7 +44,7 @@ * */ @InterfaceAudience.Private -@Metrics(about="Storage Container DataNode Metrics", context="dfs") +@Metrics(about = "Storage Container DataNode Metrics", context = "dfs") public class ContainerMetrics { public static final String STORAGE_CONTAINER_METRICS = "StorageContainerMetrics"; @@ -106,7 +106,7 @@ public void incContainerOpsMetrics(ContainerProtos.Type type) { numOpsArray[type.ordinal()].incr(); } - public long getContainerOpsMetrics(ContainerProtos.Type type){ + public long getContainerOpsMetrics(ContainerProtos.Type type) { return numOpsArray[type.ordinal()].value(); } @@ -122,7 +122,7 @@ public void incContainerBytesStats(ContainerProtos.Type type, long bytes) { opsBytesArray[type.ordinal()].incr(bytes); } - public long getContainerBytesMetrics(ContainerProtos.Type type){ + public long getContainerBytesMetrics(ContainerProtos.Type type) { return opsBytesArray[type.ordinal()].value(); } } \ No newline at end of file diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index 032705d4ee7..2b6318385dc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -189,7 +189,7 @@ public static void verifyChecksum(ContainerData containerData, HddsConfigKeys.HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED, HddsConfigKeys. HDDS_CONTAINER_CHECKSUM_VERIFICATION_ENABLED_DEFAULT); - if(enabled) { + if (enabled) { String storedChecksum = containerData.getChecksum(); Yaml yaml = ContainerDataYaml.getYamlForContainerType( diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java index 4db6d3120fd..d2ceacd37ad 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DatanodeVersionFile.java @@ -36,15 +36,15 @@ public class DatanodeVersionFile { private final String clusterId; private final String datanodeUuid; private final long cTime; - private final int layOutVersion; + private final int layoutVersion; public DatanodeVersionFile(String storageId, String clusterId, - String datanodeUuid, long cTime, int layOutVersion) { + String datanodeUuid, long cTime, int layoutVersion) { this.storageId = storageId; this.clusterId = clusterId; this.datanodeUuid = datanodeUuid; this.cTime = cTime; - this.layOutVersion = layOutVersion; + this.layoutVersion = layoutVersion; } private Properties createProperties() { @@ -54,7 +54,7 @@ private Properties createProperties() { properties.setProperty(OzoneConsts.DATANODE_UUID, datanodeUuid); properties.setProperty(OzoneConsts.CTIME, String.valueOf(cTime)); properties.setProperty(OzoneConsts.LAYOUTVERSION, String.valueOf( - layOutVersion)); + layoutVersion)); return properties; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java index f64774e9520..fa70819df0f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java @@ -129,19 +129,20 @@ public abstract class ContainerData { * Creates a ContainerData Object, which holds metadata of the container. * @param type - ContainerType * @param containerId - ContainerId - * @param layOutVersion - Container layOutVersion + * @param layoutVersion - Container layoutVersion * @param size - Container maximum size in bytes * @param originPipelineId - Pipeline Id where this container is/was created * @param originNodeId - Node Id where this container is/was created */ protected ContainerData(ContainerType type, long containerId, - ChunkLayOutVersion layOutVersion, long size, String originPipelineId, - String originNodeId) { + ContainerLayoutVersion layoutVersion, long size, + String originPipelineId, + String originNodeId) { Preconditions.checkNotNull(type); this.containerType = type; this.containerID = containerId; - this.layOutVersion = layOutVersion.getVersion(); + this.layOutVersion = layoutVersion.getVersion(); this.metadata = new TreeMap<>(); this.state = ContainerDataProto.State.OPEN; this.readCount = new AtomicLong(0L); @@ -158,7 +159,7 @@ protected ContainerData(ContainerType type, long containerId, protected ContainerData(ContainerData source) { this(source.getContainerType(), source.getContainerID(), - source.getLayOutVersion(), source.getMaxSize(), + source.getLayoutVersion(), source.getMaxSize(), source.getOriginPipelineId(), source.getOriginNodeId()); } @@ -225,11 +226,11 @@ public long getMaxSize() { } /** - * Returns the layOutVersion of the actual container data format. - * @return layOutVersion + * Returns the layoutVersion of the actual container data format. + * @return layoutVersion */ - public ChunkLayOutVersion getLayOutVersion() { - return ChunkLayOutVersion.getChunkLayOutVersion(layOutVersion); + public ContainerLayoutVersion getLayoutVersion() { + return ContainerLayoutVersion.getContainerLayoutVersion(layOutVersion); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java index 244750aab4e..b4e15dbf6d3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java @@ -251,9 +251,10 @@ public Object construct(Node node) { Map nodes = constructMapping(mnode); //Needed this, as TAG.INT type is by default converted to Long. - long layOutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION); - ChunkLayOutVersion layoutVersion = - ChunkLayOutVersion.getChunkLayOutVersion((int) layOutVersion); + long layoutVersion = (long) nodes.get(OzoneConsts.LAYOUTVERSION); + ContainerLayoutVersion containerLayoutVersion = + ContainerLayoutVersion.getContainerLayoutVersion( + (int) layoutVersion); long size = (long) nodes.get(OzoneConsts.MAX_SIZE); @@ -263,8 +264,8 @@ public Object construct(Node node) { //When a new field is added, it needs to be added here. KeyValueContainerData kvData = new KeyValueContainerData( - (long) nodes.get(OzoneConsts.CONTAINER_ID), layoutVersion, size, - originPipelineId, originNodeId); + (long) nodes.get(OzoneConsts.CONTAINER_ID), containerLayoutVersion, + size, originPipelineId, originNodeId); kvData.setContainerDBType((String)nodes.get( OzoneConsts.CONTAINER_DB_TYPE)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java similarity index 77% rename from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java rename to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index e0341fa0d8b..a5901a20879 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -36,7 +36,7 @@ /** * Defines layout versions for the Chunks. */ -public enum ChunkLayOutVersion { +public enum ContainerLayoutVersion { FILE_PER_CHUNK(1, "One file per chunk") { @Override @@ -54,29 +54,31 @@ public File getChunkFile(File chunkDir, BlockID blockID, }; private static final Logger LOG = - LoggerFactory.getLogger(ChunkLayOutVersion.class); + LoggerFactory.getLogger(ContainerLayoutVersion.class); - private static final ChunkLayOutVersion - DEFAULT_LAYOUT = ChunkLayOutVersion.FILE_PER_BLOCK; + private static final ContainerLayoutVersion + DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; - private static final List CHUNK_LAYOUT_VERSIONS = + private static final List CONTAINER_LAYOUT_VERSIONS = ImmutableList.copyOf(values()); private final int version; private final String description; - ChunkLayOutVersion(int version, String description) { + ContainerLayoutVersion(int version, String description) { this.version = version; this.description = description; } /** - * Return ChunkLayOutVersion object for the numeric chunkVersion. + * Return ContainerLayoutVersion object for the numeric containerVersion. */ - public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) { - for (ChunkLayOutVersion chunkLayOutVersion : CHUNK_LAYOUT_VERSIONS) { - if (chunkLayOutVersion.getVersion() == chunkVersion) { - return chunkLayOutVersion; + public static ContainerLayoutVersion getContainerLayoutVersion( + int containerVersion) { + for (ContainerLayoutVersion containerLayoutVersion : + CONTAINER_LAYOUT_VERSIONS) { + if (containerLayoutVersion.getVersion() == containerVersion) { + return containerLayoutVersion; } } return null; @@ -85,17 +87,17 @@ public static ChunkLayOutVersion getChunkLayOutVersion(int chunkVersion) { /** * @return list of all versions. */ - public static List getAllVersions() { - return CHUNK_LAYOUT_VERSIONS; + public static List getAllVersions() { + return CONTAINER_LAYOUT_VERSIONS; } /** * @return the latest version. */ - public static ChunkLayOutVersion getConfiguredVersion( + public static ContainerLayoutVersion getConfiguredVersion( ConfigurationSource conf) { try { - return conf.getEnum(ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY, + return conf.getEnum(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, DEFAULT_LAYOUT); } catch (IllegalArgumentException e) { return DEFAULT_LAYOUT; @@ -127,7 +129,7 @@ public File getChunkFile(ContainerData containerData, BlockID blockID, @Override public String toString() { - return "ChunkLayout:v" + version; + return "ContainerLayout:v" + version; } private static File getChunkDir(ContainerData containerData) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 5dbba2bc98e..1edd046f09a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -581,16 +581,17 @@ private EventType getEventType(ContainerCommandRequestProto msg) { } private void audit(AuditAction action, EventType eventType, - Map params, AuditEventStatus result, Throwable exception){ + Map params, AuditEventStatus result, + Throwable exception) { AuditMessage amsg; switch (result) { case SUCCESS: - if(isAllowed(action.getAction())) { - if(eventType == EventType.READ && + if (isAllowed(action.getAction())) { + if (eventType == EventType.READ && AUDIT.getLogger().isInfoEnabled(AuditMarker.READ.getMarker())) { amsg = buildAuditMessageForSuccess(action, params); AUDIT.logReadSuccess(amsg); - } else if(eventType == EventType.WRITE && + } else if (eventType == EventType.WRITE && AUDIT.getLogger().isInfoEnabled(AuditMarker.WRITE.getMarker())) { amsg = buildAuditMessageForSuccess(action, params); AUDIT.logWriteSuccess(amsg); @@ -599,11 +600,11 @@ private void audit(AuditAction action, EventType eventType, break; case FAILURE: - if(eventType == EventType.READ && + if (eventType == EventType.READ && AUDIT.getLogger().isErrorEnabled(AuditMarker.READ.getMarker())) { amsg = buildAuditMessageForFailure(action, params, exception); AUDIT.logReadFailure(amsg); - } else if(eventType == EventType.WRITE && + } else if (eventType == EventType.WRITE && AUDIT.getLogger().isErrorEnabled(AuditMarker.WRITE.getMarker())) { amsg = buildAuditMessageForFailure(action, params, exception); AUDIT.logWriteFailure(amsg); @@ -656,7 +657,7 @@ enum EventType { * @return true or false accordingly. */ private boolean isAllowed(String action) { - switch(action) { + switch (action) { case "CLOSE_CONTAINER": case "CREATE_CONTAINER": case "LIST_CONTAINER": diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java index b736eb536ed..d6ca2d120e6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/OpenContainerBlockMap.java @@ -130,7 +130,7 @@ public List getOpenBlocks(long containerId) { public void removeFromBlockMap(BlockID blockID) { Preconditions.checkNotNull(blockID); containers.computeIfPresent(blockID.getContainerID(), (containerId, blocks) - -> blocks.removeAndGetSize(blockID.getLocalID()) == 0? null: blocks); + -> blocks.removeAndGetSize(blockID.getLocalID()) == 0 ? null : blocks); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java index 14ae4c943c0..24df9f5b1ee 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java @@ -37,8 +37,6 @@ public class DatanodeConfiguration { private static final Logger LOG = LoggerFactory.getLogger(DatanodeConfiguration.class); - static final String REPLICATION_STREAMS_LIMIT_KEY = - "hdds.datanode.replication.streams.limit"; static final String CONTAINER_DELETE_THREADS_MAX_KEY = "hdds.datanode.container.delete.threads.max"; static final String PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY = @@ -57,8 +55,6 @@ public class DatanodeConfiguration { static final boolean CHUNK_DATA_VALIDATION_CHECK_DEFAULT = false; - static final int REPLICATION_MAX_STREAMS_DEFAULT = 10; - static final long PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT = 60; static final int FAILED_VOLUMES_TOLERATED_DEFAULT = -1; @@ -71,19 +67,6 @@ public class DatanodeConfiguration { static final long DISK_CHECK_TIMEOUT_DEFAULT = Duration.ofMinutes(10).toMillis(); - /** - * The maximum number of replication commands a single datanode can execute - * simultaneously. - */ - @Config(key = "replication.streams.limit", - type = ConfigType.INT, - defaultValue = "10", - tags = {DATANODE}, - description = "The maximum number of replication commands a single " + - "datanode can execute simultaneously" - ) - private int replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT; - /** * Number of threads per volume that Datanode will use for chunk read. */ @@ -138,7 +121,7 @@ public class DatanodeConfiguration { type = ConfigType.INT, defaultValue = "1440", tags = {DATANODE}, - description = "The maximum number of block delete commands queued on "+ + description = "The maximum number of block delete commands queued on " + " a datanode" ) private int blockDeleteQueueLimit = 60 * 24; @@ -264,13 +247,6 @@ public void setWaitOnAllFollowers(boolean val) { @PostConstruct public void validate() { - if (replicationMaxStreams < 1) { - LOG.warn(REPLICATION_STREAMS_LIMIT_KEY + " must be greater than zero " + - "and was set to {}. Defaulting to {}", - replicationMaxStreams, REPLICATION_MAX_STREAMS_DEFAULT); - replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT; - } - if (containerDeleteThreads < 1) { LOG.warn(CONTAINER_DELETE_THREADS_MAX_KEY + " must be greater than zero" + " and was set to {}. Defaulting to {}", @@ -316,18 +292,10 @@ public void validate() { } } - public void setReplicationMaxStreams(int replicationMaxStreams) { - this.replicationMaxStreams = replicationMaxStreams; - } - public void setContainerDeleteThreads(int containerDeleteThreads) { this.containerDeleteThreads = containerDeleteThreads; } - public int getReplicationMaxStreams() { - return replicationMaxStreams; - } - public int getContainerDeleteThreads() { return containerDeleteThreads; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index ee5e87adcae..ce79049f4fd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.container.replication.ContainerReplicator; import org.apache.hadoop.ozone.container.replication.DownloadAndImportReplicator; import org.apache.hadoop.ozone.container.replication.MeasuredReplicator; +import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; import org.apache.hadoop.ozone.container.replication.ReplicationSupervisorMetrics; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; @@ -166,9 +167,11 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, replicatorMetrics = new MeasuredReplicator(replicator); + ReplicationConfig replicationConfig = + conf.getObject(ReplicationConfig.class); supervisor = new ReplicationSupervisor(container.getContainerSet(), context, - replicatorMetrics, dnConf.getReplicationMaxStreams()); + replicatorMetrics, replicationConfig); replicationSupervisorMetrics = ReplicationSupervisorMetrics.create(supervisor); @@ -290,7 +293,7 @@ private void start() throws IOException { now = Time.monotonicNow(); if (now < nextHB.get()) { - if(!Thread.interrupted()) { + if (!Thread.interrupted()) { try { Thread.sleep(nextHB.get() - now); } catch (InterruptedException e) { @@ -376,7 +379,7 @@ public void close() throws IOException { connectionManager.close(); } - if(container != null) { + if (container != null) { container.stop(); } @@ -634,12 +637,12 @@ public DatanodeLayoutStorage getLayoutStorage() { } public StatusAndMessages finalizeUpgrade() - throws IOException{ + throws IOException { return upgradeFinalizer.finalize(datanodeDetails.getUuidString(), this); } public StatusAndMessages queryUpgradeStatus() - throws IOException{ + throws IOException { return upgradeFinalizer.reportStatus(datanodeDetails.getUuidString(), true); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 9eea758b0d8..c75da0a74d6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -184,7 +184,7 @@ public StateContext(ConfigurationSource conf, /** * init related ReportType Collections. */ - private void initReportTypeCollection(){ + private void initReportTypeCollection() { fullReportTypeList.add(CONTAINER_REPORTS_PROTO_NAME); type2Reports.put(CONTAINER_REPORTS_PROTO_NAME, containerReports); fullReportTypeList.add(NODE_REPORT_PROTO_NAME); @@ -221,7 +221,7 @@ boolean isEntering() { */ boolean isExiting(DatanodeStateMachine.DatanodeStates newState) { boolean isExiting = state != newState && stateExecutionCount.get() > 0; - if(isExiting) { + if (isExiting) { stateExecutionCount.set(0); } return isExiting; @@ -344,7 +344,7 @@ public void putBackReports(List reportsToPutBack, Preconditions.checkState(reportType != null); } synchronized (incrementalReportsQueue) { - if (incrementalReportsQueue.containsKey(endpoint)){ + if (incrementalReportsQueue.containsKey(endpoint)) { incrementalReportsQueue.get(endpoint).addAll(0, reportsToPutBack); } } @@ -381,7 +381,7 @@ List getFullReports( InetSocketAddress endpoint) { Map mp = fullReportSendIndicator.get(endpoint); List nonIncrementalReports = new LinkedList<>(); - if (null != mp){ + if (null != mp) { for (Map.Entry kv : mp.entrySet()) { if (kv.getValue().get()) { String reportType = kv.getKey(); @@ -817,14 +817,14 @@ public Map getCommandStatusMap() { */ public boolean updateCommandStatus(Long cmdId, Consumer cmdStatusUpdater) { - if(cmdStatusMap.containsKey(cmdId)) { + if (cmdStatusMap.containsKey(cmdId)) { cmdStatusUpdater.accept(cmdStatusMap.get(cmdId)); return true; } return false; } - public void configureHeartbeatFrequency(){ + public void configureHeartbeatFrequency() { heartbeatFrequency.set(getScmHeartbeatInterval(conf)); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java index a5044cb0685..7908e3d7d28 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CommandDispatcher.java @@ -68,7 +68,7 @@ private CommandDispatcher(OzoneContainer container, SCMConnectionManager this.connectionManager = connectionManager; handlerMap = new HashMap<>(); for (CommandHandler h : handlers) { - if(handlerMap.containsKey(h.getCommandType())){ + if (handlerMap.containsKey(h.getCommandType())) { LOG.error("Duplicate handler for the same command. Exiting. Handle " + "key : {}", h.getCommandType().getDescriptorForType().getName()); throw new IllegalArgumentException("Duplicate handler for the same " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java index 217592ddccd..a766de025da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java @@ -81,7 +81,7 @@ public DatanodeStateMachine.DatanodeStates call() throws Exception { try { addresses = getSCMAddressForDatanodes(conf); } catch (IllegalArgumentException e) { - if(!Strings.isNullOrEmpty(e.getMessage())) { + if (!Strings.isNullOrEmpty(e.getMessage())) { LOG.error("Failed to get SCM addresses: {}", e.getMessage()); } return DatanodeStateMachine.DatanodeStates.SHUTDOWN; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index fa6c937f633..d80d1e5bca3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -122,7 +122,7 @@ public EndpointStateMachine.EndPointStates call() throws Exception { } } catch (DiskOutOfSpaceException ex) { rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN); - } catch(IOException ex) { + } catch (IOException ex) { rpcEndPoint.logIfNeeded(ex); } finally { rpcEndPoint.unlock(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java index 4ecf2789a42..557473bf9ce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/CSMMetrics.java @@ -33,7 +33,7 @@ * This class is for maintaining Container State Machine statistics. */ @InterfaceAudience.Private -@Metrics(about="Container State Machine Metrics", context="dfs") +@Metrics(about = "Container State Machine Metrics", context = "dfs") public class CSMMetrics { public static final String SOURCE_NAME = CSMMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 301fc59237f..4ef532049fb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -689,7 +689,7 @@ public CompletableFuture read( private synchronized void updateLastApplied() { Long appliedTerm = null; long appliedIndex = -1; - for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { + for (long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { final Long removed = applyTransactionCompletionMap.remove(i); if (removed == null) { break; @@ -740,7 +740,7 @@ private CompletableFuture submitTask( = queue.submit(task, executor); // after the task is completed, remove the queue if the queue is empty. f.thenAccept(dummy -> containerTaskQueues.computeIfPresent(containerId, - (id, q) -> q.isEmpty()? null: q)); + (id, q) -> q.isEmpty() ? null : q)); return f; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index c04e5e967ba..237b4486826 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -273,7 +273,7 @@ private RaftProperties newRaftProperties() { // Set the ratis storage directory Collection storageDirPaths = HddsServerUtil.getOzoneDatanodeRatisDirectory(conf); - List storageDirs= new ArrayList<>(storageDirPaths.size()); + List storageDirs = new ArrayList<>(storageDirPaths.size()); storageDirPaths.stream().forEach(d -> storageDirs.add(new File(d))); RaftServerConfigKeys.setStorageDir(properties, storageDirs); @@ -693,7 +693,7 @@ private long calculatePipelineBytesWritten(HddsProtos.PipelineID pipelineID) { long bytesWritten = 0; Iterator> containerIt = containerController.getContainers(); - while(containerIt.hasNext()) { + while (containerIt.hasNext()) { ContainerData containerData = containerIt.next().getContainerData(); if (containerData.getOriginPipelineId() .compareTo(pipelineID.getId()) == 0) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java index 83b8615887c..6a38080214a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/HddsVolumeUtil.java @@ -145,7 +145,7 @@ public static int getLayOutVersion(Properties props, File versionFile) throws String lvStr = getProperty(props, OzoneConsts.LAYOUTVERSION, versionFile); int lv = Integer.parseInt(lvStr); - if(HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) { + if (HDDSVolumeLayoutVersion.getLatestVersion().getVersion() != lv) { throw new InconsistentStorageStateException("Invalid layOutVersion. " + "Version file has layOutVersion as " + lv + " and latest Datanode " + "layOutVersion is " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 35ff05e7074..98e16294da1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -290,7 +290,7 @@ public void checkVolumeAsync(StorageVolume volume) { } public void refreshAllVolumeUsage() { - volumeMap.forEach((k, v)-> v.refreshVolumeInfo()); + volumeMap.forEach((k, v) -> v.refreshVolumeInfo()); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 5f629ad464f..715cb8400fe 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -161,7 +161,7 @@ public VolumeSet getVolumeSet() { } public StorageType getStorageType() { - if(this.volumeInfo != null) { + if (this.volumeInfo != null) { return this.volumeInfo.getStorageType(); } return StorageType.DEFAULT; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 255e7ea82e8..1fcac8327fa 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -87,7 +87,7 @@ public synchronized void shutdown() { } } - public void refreshNow(){ + public void refreshNow() { source.refreshNow(); } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 1284f6a102e..e0ba37a99d6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -382,7 +382,7 @@ private void updateContainerData(Runnable update) private void compactDB() throws StorageContainerException { try { - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { db.getStore().compactDB(); } } catch (StorageContainerException ex) { @@ -435,7 +435,7 @@ public void update( // holding lock and writing data to disk. We can have async implementation // to flush the update container data to disk. long containerId = containerData.getContainerID(); - if(!containerData.isValid()) { + if (!containerData.isValid()) { LOG.debug("Invalid container data. ContainerID: {}", containerId); throw new StorageContainerException("Invalid container data. " + "ContainerID: " + containerId, INVALID_CONTAINER_STATE); @@ -774,7 +774,7 @@ private enum ContainerCheckLevel { * @return * @throws IOException */ - private File createTempFile(File file) throws IOException{ + private File createTempFile(File file) throws IOException { return File.createTempFile("tmp_" + System.currentTimeMillis() + "_", file.getName(), file.getParentFile()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 11c245ab148..40d527d464e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; @@ -228,15 +228,15 @@ private void scanData(DataTransferThrottler throttler, Canceler canceler) onDiskContainerData.setDbFile(dbFile); - ChunkLayOutVersion layout = onDiskContainerData.getLayOutVersion(); + ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion(); - try(ReferenceCountedDB db = + try (ReferenceCountedDB db = BlockUtils.getDB(onDiskContainerData, checkConfig); BlockIterator kvIter = db.getStore().getBlockIterator()) { - while(kvIter.hasNext()) { + while (kvIter.hasNext()) { BlockData block = kvIter.nextBlock(); - for(ContainerProtos.ChunkInfo chunk : block.getChunks()) { + for (ContainerProtos.ChunkInfo chunk : block.getChunks()) { File chunkFile = layout.getChunkFile(onDiskContainerData, block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk)); @@ -263,7 +263,7 @@ private void scanData(DataTransferThrottler throttler, Canceler canceler) private static void verifyChecksum(BlockData block, ContainerProtos.ChunkInfo chunk, File chunkFile, - ChunkLayOutVersion layout, + ContainerLayoutVersion layout, DataTransferThrottler throttler, Canceler canceler) throws IOException { ChecksumData checksumData = ChecksumData.getFromProtoBuf(chunk.getChecksumData()); @@ -275,12 +275,12 @@ private static void verifyChecksum(BlockData block, long bytesRead = 0; try (FileChannel channel = FileChannel.open(chunkFile.toPath(), ChunkUtils.READ_OPTIONS, ChunkUtils.NO_ATTRIBUTES)) { - if (layout == ChunkLayOutVersion.FILE_PER_BLOCK) { + if (layout == ContainerLayoutVersion.FILE_PER_BLOCK) { channel.position(chunk.getOffset()); } for (int i = 0; i < checksumCount; i++) { // limit last read for FILE_PER_BLOCK, to avoid reading next chunk - if (layout == ChunkLayOutVersion.FILE_PER_BLOCK && + if (layout == ContainerLayoutVersion.FILE_PER_BLOCK && i == checksumCount - 1 && chunk.getLen() % bytesPerChecksum != 0) { buffer.limit((int) (chunk.getLen() % bytesPerChecksum)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java index e1a1f023158..81333073cc6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java @@ -30,7 +30,7 @@ .ContainerDataProto; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.yaml.snakeyaml.nodes.Tag; @@ -96,12 +96,12 @@ public class KeyValueContainerData extends ContainerData { /** * Constructs KeyValueContainerData object. * @param id - ContainerId - * @param layOutVersion chunk layout + * @param layoutVersion container layout * @param size - maximum size of the container in bytes */ - public KeyValueContainerData(long id, ChunkLayOutVersion layOutVersion, + public KeyValueContainerData(long id, ContainerLayoutVersion layoutVersion, long size, String originPipelineId, String originNodeId) { - super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion, + super(ContainerProtos.ContainerType.KeyValueContainer, id, layoutVersion, size, originPipelineId, originNodeId); this.numPendingDeletionBlocks = new AtomicLong(0); this.deleteTransactionId = 0; @@ -270,7 +270,7 @@ public ContainerDataProto getProtoBufMessage() { builder.setBytesUsed(this.getBytesUsed()); } - if(this.getContainerType() != null) { + if (this.getContainerType() != null) { builder.setContainerType(ContainerProtos.ContainerType.KeyValueContainer); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index b499755a017..ff2d061cb72 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -59,7 +59,7 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -194,7 +194,7 @@ static ContainerCommandResponseProto dispatchRequest(KeyValueHandler handler, DispatcherContext dispatcherContext) { Type cmdType = request.getCmdType(); - switch(cmdType) { + switch (cmdType) { case CreateContainer: return handler.handleCreateContainer(request, kvContainer); case ReadContainer: @@ -266,8 +266,8 @@ ContainerCommandResponseProto handleCreateContainer( long containerID = request.getContainerID(); - ChunkLayOutVersion layoutVersion = - ChunkLayOutVersion.getConfiguredVersion(conf); + ContainerLayoutVersion layoutVersion = + ContainerLayoutVersion.getConfiguredVersion(conf); KeyValueContainerData newContainerData = new KeyValueContainerData( containerID, layoutVersion, maxContainerSize, request.getPipelineID(), getDatanodeId()); @@ -744,7 +744,7 @@ ContainerCommandResponseProto handleWriteChunk( .writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext); // We should increment stats after writeChunk - if (stage == WriteChunkStage.WRITE_DATA|| + if (stage == WriteChunkStage.WRITE_DATA || stage == WriteChunkStage.COMBINED) { metrics.incContainerBytesStats(Type.WriteChunk, writeChunk .getChunkData().getLen()); @@ -959,7 +959,7 @@ public Container importContainer(ContainerData originalContainerData, public void exportContainer(final Container container, final OutputStream outputStream, final TarContainerPacker packer) - throws IOException{ + throws IOException { final KeyValueContainer kvc = (KeyValueContainer) container; kvc.exportContainerData(outputStream, packer); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java index ad1673a02ab..dde3e2e22dc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerLocationUtil.java @@ -97,7 +97,7 @@ private static String getBaseContainerLocation(String hddsVolumeDir, * @param containerId * @return container sub directory */ - private static String getContainerSubDirectory(long containerId){ + private static String getContainerSubDirectory(long containerId) { int directory = (int) ((containerId >> 9) & 0xFF); return Storage.CONTAINER_DIR + directory; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 58a0dcd9492..8256d0a5b4a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -119,7 +119,7 @@ public static long persistPutBlock(KeyValueContainer container, "cannot be negative"); // We are not locking the key manager since LevelDb serializes all actions // against a single DB. We rely on DB level locking to avoid conflicts. - try(ReferenceCountedDB db = BlockUtils. + try (ReferenceCountedDB db = BlockUtils. getDB(container.getContainerData(), config)) { // This is a post condition that acts as a hint to the user. // Should never fail. @@ -216,7 +216,7 @@ public BlockData getBlock(Container container, BlockID blockID) + containerBCSId + ".", UNKNOWN_BCSID); } - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -244,7 +244,7 @@ public long getCommittedBlockLength(Container container, BlockID blockID) throws IOException { KeyValueContainerData containerData = (KeyValueContainerData) container .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(containerData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); @@ -276,7 +276,7 @@ public void deleteBlock(Container container, BlockID blockID) throws KeyValueContainerData cData = (KeyValueContainerData) container .getContainerData(); - try(ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { + try (ReferenceCountedDB db = BlockUtils.getDB(cData, config)) { // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, DB_NULL_ERR_MSG); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java index 27fe0d9cc0d..e998278c6f4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; @@ -43,8 +43,8 @@ import java.util.Map; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; /** * Selects ChunkManager implementation to use for each chunk operation. @@ -54,8 +54,8 @@ public class ChunkManagerDispatcher implements ChunkManager { private static final Logger LOG = LoggerFactory.getLogger(ChunkManagerDispatcher.class); - private final Map handlers - = new EnumMap<>(ChunkLayOutVersion.class); + private final Map handlers + = new EnumMap<>(ContainerLayoutVersion.class); ChunkManagerDispatcher(boolean sync, BlockManager manager, VolumeSet volSet) { @@ -128,11 +128,13 @@ public void shutdown() { private @Nonnull ChunkManager selectHandler(Container container) throws StorageContainerException { - ChunkLayOutVersion layout = container.getContainerData().getLayOutVersion(); + ContainerLayoutVersion layout = + container.getContainerData().getLayoutVersion(); return selectVersionHandler(layout); } - private @Nonnull ChunkManager selectVersionHandler(ChunkLayOutVersion version) + private @Nonnull ChunkManager selectVersionHandler( + ContainerLayoutVersion version) throws StorageContainerException { ChunkManager versionHandler = handlers.get(version); if (versionHandler == null) { @@ -142,7 +144,7 @@ public void shutdown() { } private static ChunkManager throwUnknownLayoutVersion( - ChunkLayOutVersion version) throws StorageContainerException { + ContainerLayoutVersion version) throws StorageContainerException { String message = "Unsupported storage container layout: " + version; LOG.warn(message); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 5fd23b59a15..18c6b9d28d5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -56,7 +56,7 @@ import java.util.concurrent.ExecutionException; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage.COMMIT_DATA; import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize; @@ -86,7 +86,7 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager, private static void checkLayoutVersion(Container container) { Preconditions.checkArgument( - container.getContainerData().getLayOutVersion() == FILE_PER_BLOCK); + container.getContainerData().getLayoutVersion() == FILE_PER_BLOCK); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index f2109cb745d..52a8b7c6c47 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -55,7 +55,7 @@ import java.util.List; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize; /** @@ -82,7 +82,7 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager, private static void checkLayoutVersion(Container container) { Preconditions.checkArgument( - container.getContainerData().getLayOutVersion() == FILE_PER_CHUNK); + container.getContainerData().getLayoutVersion() == FILE_PER_CHUNK); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 905918a2f92..d40afc5f674 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -351,7 +351,7 @@ public ContainerBackgroundTaskResult deleteViaSchema1( } // Once blocks are deleted... remove the blockID from blockDataTable. - try(BatchOperation batch = meta.getStore().getBatchHandler() + try (BatchOperation batch = meta.getStore().getBatchHandler() .initBatchOperation()) { for (String entry : succeedBlocks) { blockDataTable.deleteWithBatch(batch, entry); @@ -426,7 +426,7 @@ public ContainerBackgroundTaskResult deleteViaSchema2( // Once blocks are deleted... remove the blockID from blockDataTable // and also remove the transactions from txnTable. - try(BatchOperation batch = meta.getStore().getBatchHandler() + try (BatchOperation batch = meta.getStore().getBatchHandler() .initBatchOperation()) { for (DeletedBlocksTransaction delTx : delBlocks) { deleteTxns.deleteWithBatch(batch, delTx.getTxID()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index 15a8a9eb5b4..a3049be4674 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -276,7 +276,7 @@ public BlockData nextBlock() throws IOException, NoSuchElementException { nextBlock = null; return currentBlock; } - if(hasNext()) { + if (hasNext()) { return nextBlock(); } throw new NoSuchElementException("Block Iterator reached end for " + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java index 9beec5b16c8..171303dc0b4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerController.java @@ -112,7 +112,12 @@ public void markContainerForClose(final long containerId) public void markContainerUnhealthy(final long containerId) throws IOException { Container container = containerSet.getContainer(containerId); - getHandler(container).markContainerUnhealthy(container); + if (container != null) { + getHandler(container).markContainerUnhealthy(container); + } else { + LOG.warn("Container {} not found, may be deleted, skip mark UNHEALTHY", + containerId); + } } /** @@ -206,7 +211,12 @@ public Iterator> getContainers(HddsVolume volume) { void updateDataScanTimestamp(long containerId, Instant timestamp) throws IOException { Container container = containerSet.getContainer(containerId); - container.updateDataScanTimestamp(timestamp); + if (container != null) { + container.updateDataScanTimestamp(timestamp); + } else { + LOG.warn("Container {} not found, may be deleted, " + + "skip update DataScanTimestamp", containerId); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java index 4a20dc326a7..c9244855316 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerDataScrubberMetrics.java @@ -32,7 +32,7 @@ * This class captures the container data scrubber metrics on the data-node. **/ @InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") +@Metrics(about = "DataNode container data scrubber metrics", context = "dfs") public final class ContainerDataScrubberMetrics { private final String name; @@ -110,8 +110,8 @@ private ContainerDataScrubberMetrics(String name, MetricsSystem ms) { public static ContainerDataScrubberMetrics create(final String volumeName) { MetricsSystem ms = DefaultMetricsSystem.instance(); - String name = "ContainerDataScrubberMetrics-"+ (volumeName.isEmpty() - ? "UndefinedDataNodeVolume"+ ThreadLocalRandom.current().nextInt() + String name = "ContainerDataScrubberMetrics-" + (volumeName.isEmpty() + ? "UndefinedDataNodeVolume" + ThreadLocalRandom.current().nextInt() : volumeName.replace(':', '-')); return ms.register(name, null, new ContainerDataScrubberMetrics(name, ms)); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java index 96efcf4a146..59657b064af 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScanner.java @@ -90,7 +90,7 @@ void runIteration() { metrics.incNumContainersScanned(); } } - long interval = System.nanoTime()-start; + long interval = System.nanoTime() - start; if (!stopping) { metrics.incNumScanIterations(); LOG.info("Completed an iteration of container metadata scrubber in" + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java index cf8e61725be..b70a3e5ed55 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerMetadataScrubberMetrics.java @@ -30,7 +30,7 @@ * data-node. **/ @InterfaceAudience.Private -@Metrics(about="DataNode container data scrubber metrics", context="dfs") +@Metrics(about = "DataNode container data scrubber metrics", context = "dfs") public final class ContainerMetadataScrubberMetrics { private final String name; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java index 548d1147a7f..023b251a524 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java @@ -49,7 +49,7 @@ /** * Client to read container data from gRPC. */ -public class GrpcReplicationClient implements AutoCloseable{ +public class GrpcReplicationClient implements AutoCloseable { private static final Logger LOG = LoggerFactory.getLogger(GrpcReplicationClient.class); @@ -161,7 +161,16 @@ public void onNext(CopyContainerResponseProto chunk) { try { chunk.getData().writeTo(stream); } catch (IOException e) { - response.completeExceptionally(e); + LOG.error("Failed to write the stream buffer to {} for container {}", + outputPath, containerId, e); + try { + stream.close(); + } catch (IOException ex) { + LOG.error("Failed to close OutputStream {}", outputPath, e); + } finally { + deleteOutputOnFailure(); + response.completeExceptionally(e); + } } } @@ -176,6 +185,7 @@ public void onError(Throwable throwable) { } catch (IOException e) { LOG.error("Failed to close {} for container {}", outputPath, containerId, e); + deleteOutputOnFailure(); response.completeExceptionally(e); } } @@ -189,9 +199,9 @@ public void onCompleted() { } catch (IOException e) { LOG.error("Downloaded container {} OK, but failed to close {}", containerId, outputPath, e); + deleteOutputOnFailure(); response.completeExceptionally(e); } - } private void deleteOutputOnFailure() { @@ -204,5 +214,4 @@ private void deleteOutputOnFailure() { } } } - } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java index dd5f4c42869..bf8d6f10256 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationServer.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -22,7 +22,8 @@ import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; -import org.apache.hadoop.hdds.conf.ConfigTag; +import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.hdds.conf.PostConstruct; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor; @@ -39,6 +40,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE; +import static org.apache.hadoop.hdds.conf.ConfigTag.MANAGEMENT; + /** * Separated network server for server2server container replication. */ @@ -128,12 +132,33 @@ public int getPort() { /** * Replication-related configuration. */ - @ConfigGroup(prefix = "hdds.datanode.replication") + @ConfigGroup(prefix = ReplicationConfig.PREFIX) public static final class ReplicationConfig { - @Config(key = "port", defaultValue = "9886", description = "Port used for" - + " the server2server replication server", tags = { - ConfigTag.MANAGEMENT}) + public static final String PREFIX = "hdds.datanode.replication"; + public static final String STREAMS_LIMIT_KEY = "streams.limit"; + + public static final String REPLICATION_STREAMS_LIMIT_KEY = + PREFIX + "." + STREAMS_LIMIT_KEY; + + public static final int REPLICATION_MAX_STREAMS_DEFAULT = 10; + + /** + * The maximum number of replication commands a single datanode can execute + * simultaneously. + */ + @Config(key = STREAMS_LIMIT_KEY, + type = ConfigType.INT, + defaultValue = "10", + tags = {DATANODE}, + description = "The maximum number of replication commands a single " + + "datanode can execute simultaneously" + ) + private int replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT; + + @Config(key = "port", defaultValue = "9886", + description = "Port used for the server2server replication server", + tags = {DATANODE, MANAGEMENT}) private int port; public int getPort() { @@ -144,6 +169,25 @@ public ReplicationConfig setPort(int portParam) { this.port = portParam; return this; } + + public int getReplicationMaxStreams() { + return replicationMaxStreams; + } + + public void setReplicationMaxStreams(int replicationMaxStreams) { + this.replicationMaxStreams = replicationMaxStreams; + } + + @PostConstruct + public void validate() { + if (replicationMaxStreams < 1) { + LOG.warn(REPLICATION_STREAMS_LIMIT_KEY + " must be greater than zero " + + "and was set to {}. Defaulting to {}", + replicationMaxStreams, REPLICATION_MAX_STREAMS_DEFAULT); + replicationMaxStreams = REPLICATION_MAX_STREAMS_DEFAULT; + } + } + } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 05a4173eb78..4cb826c6ec7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; import org.apache.hadoop.ozone.container.replication.ReplicationTask.Status; import com.google.common.annotations.VisibleForTesting; @@ -71,6 +72,13 @@ public class ReplicationSupervisor { this.context = context; } + public ReplicationSupervisor( + ContainerSet containerSet, StateContext context, + ContainerReplicator replicator, ReplicationConfig replicationConfig) { + this(containerSet, context, replicator, + replicationConfig.getReplicationMaxStreams()); + } + public ReplicationSupervisor( ContainerSet containerSet, StateContext context, ContainerReplicator replicator, int poolSize) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java index e14a391dcb1..fc9b44924ab 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/stream/DirstreamClientHandler.java @@ -116,7 +116,7 @@ public void doRead(ChannelHandlerContext ctx, ByteBuf buffer) } } - public boolean isAtTheEnd(){ + public boolean isAtTheEnd() { return getCurrentFileName().equals(END_MARKER); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java index 9ff4b0aa3db..f25e13c285e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/DataNodeUpgradeFinalizer.java @@ -46,7 +46,7 @@ public DataNodeUpgradeFinalizer(HDDSLayoutVersionManager versionManager) { @Override public void preFinalizeUpgrade(DatanodeStateMachine dsm) throws IOException { - if(!canFinalizeDataNode(dsm)) { + if (!canFinalizeDataNode(dsm)) { // DataNode is not yet ready to finalize. // Reset the Finalization state. getVersionManager().setUpgradeState(FINALIZATION_REQUIRED); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java index ec8494604ac..3653e6c9fa7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java @@ -120,7 +120,7 @@ public static String chooseContainerPathID(ConfigurationSource conf, boolean scmHAEnabled = conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); - if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled){ + if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled) { return clusterID; } else { return scmID; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java index e3ea4aeeaff..6aa0554e102 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/ReregisterCommand.java @@ -27,7 +27,7 @@ * Informs a datanode to register itself with SCM again. */ public class ReregisterCommand extends - SCMCommand{ + SCMCommand { /** * Returns the type of this command. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index 08ca4c91f5b..6deaddadc9c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -114,7 +114,7 @@ public static void tearDown() { } @Before - public void setUpDNCertClient(){ + public void setUpDNCertClient() { FileUtils.deleteQuietly(Paths.get( securityConfig.getKeyLocation(DN_COMPONENT).toString(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 15cd4d060c7..825432290dc 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -115,7 +115,7 @@ public static DatanodeDetails createDatanodeDetails() { } public static KeyValueContainer getContainer(long containerId, - ChunkLayOutVersion layout, + ContainerLayoutVersion layout, ContainerProtos.ContainerDataProto.State state) { KeyValueContainerData kvData = new KeyValueContainerData(containerId, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 157dee65ff8..eef66550df6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -131,9 +131,10 @@ public int getContainerReportsCount() { * @return - count of reported containers. */ public long getContainerCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.size(); - }).sum(); + return nodeContainers.values().parallelStream().mapToLong( + (containerMap) -> { + return containerMap.size(); + }).sum(); } /** @@ -141,11 +142,13 @@ public long getContainerCount() { * @return - number of keys reported. */ public long getKeyCount() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getKeyCount(); - }).sum(); - }).sum(); + return nodeContainers.values().parallelStream().mapToLong( + (containerMap) -> { + return containerMap.values().parallelStream().mapToLong( + (container) -> { + return container.getKeyCount(); + }).sum(); + }).sum(); } /** @@ -153,11 +156,13 @@ public long getKeyCount() { * @return - number of bytes used. */ public long getBytesUsed() { - return nodeContainers.values().parallelStream().mapToLong((containerMap)->{ - return containerMap.values().parallelStream().mapToLong((container) -> { - return container.getUsed(); - }).sum(); - }).sum(); + return nodeContainers.values().parallelStream().mapToLong( + (containerMap) -> { + return containerMap.values().parallelStream().mapToLong( + (container) -> { + return container.getUsed(); + }).sum(); + }).sum(); } /** @@ -259,7 +264,7 @@ public void updateNodeReport(DatanodeDetailsProto datanodeDetailsProto, List storageReports = nodeReport.getStorageReportList(); - for(StorageReportProto report : storageReports) { + for (StorageReportProto report : storageReports) { nodeReportProto.addStorageReport(report); } @@ -313,7 +318,7 @@ public int getNodeReportsCount(DatanodeDetails datanodeDetails) { public int getContainerCountsForDatanode(DatanodeDetails datanodeDetails) { Map cr = nodeContainers.get(datanodeDetails); - if(cr != null) { + if (cr != null) { return cr.size(); } return 0; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 347961ad9ec..5306eb05d92 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; @@ -94,7 +94,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_VERSIONS; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V1; import static org.apache.hadoop.ozone.OzoneConsts.SCHEMA_V2; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.states.endpoint.VersionEndpointTask.LOG; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -113,7 +113,7 @@ public class TestBlockDeletingService { private static String datanodeUuid; private static MutableConfigurationSource conf; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; private final String schemaVersion; private int blockLimitPerInterval; private static VolumeSet volumeSet; @@ -134,16 +134,17 @@ public static Iterable parameters() { */ public static class LayoutInfo { private final String schemaVersion; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public LayoutInfo(String schemaVersion, ChunkLayOutVersion layout) { + public LayoutInfo(String schemaVersion, ContainerLayoutVersion layout) { this.schemaVersion = schemaVersion; this.layout = layout; } private static List layoutList = new ArrayList<>(); static { - for (ChunkLayOutVersion ch : ChunkLayOutVersion.getAllVersions()) { + for (ContainerLayoutVersion ch : + ContainerLayoutVersion.getAllVersions()) { for (String sch : SCHEMA_VERSIONS) { layoutList.add(new LayoutInfo(sch, ch)); } @@ -355,7 +356,8 @@ private void updateMetaData(KeyValueContainerData data, .put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long) numOfBlocksPerContainer); } catch (IOException exception) { - LOG.warn("Meta Data update was not successful for container: "+container); + LOG.warn("Meta Data update was not successful for container: " + + container); } } @@ -426,7 +428,7 @@ public void testBlockDeletion() throws Exception { KeyValueContainerData data = (KeyValueContainerData) containerData.get(0); Assert.assertEquals(1, containerData.size()); - try(ReferenceCountedDB meta = BlockUtils.getDB( + try (ReferenceCountedDB meta = BlockUtils.getDB( (KeyValueContainerData) containerData.get(0), conf)) { Map> containerMap = containerSet.getContainerMapCopy(); // NOTE: this test assumes that all the container is KetValueContainer and @@ -733,7 +735,7 @@ public void testBlockThrottle() throws Exception { // in all the containers are deleted)). deleteAndWait(service, 2); - long totalContainerBlocks = blocksPerContainer*containerCount; + long totalContainerBlocks = blocksPerContainer * containerCount; GenericTestUtils.waitFor(() -> totalContainerBlocks * blockSpace == (totalContainerSpace - diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 562775d263c..e55d68cbe34 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -177,7 +177,7 @@ public void testConcurrentDBGet() throws Exception { for (Future future: futureList) { try { future.get(); - } catch (InterruptedException| ExecutionException e) { + } catch (InterruptedException | ExecutionException e) { Assert.fail("Should get the DB instance"); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerLayoutVersion.java similarity index 73% rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerLayoutVersion.java index be6c6798b6d..fb5b4914fb5 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestChunkLayOutVersion.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerLayoutVersion.java @@ -18,21 +18,21 @@ package org.apache.hadoop.ozone.container.common; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.junit.Test; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.junit.Assert.assertEquals; /** - * This class tests ChunkLayOutVersion. + * This class tests ContainerLayoutVersion. */ -public class TestChunkLayOutVersion { +public class TestContainerLayoutVersion { @Test public void testVersionCount() { - assertEquals(2, ChunkLayOutVersion.getAllVersions().size()); + assertEquals(2, ContainerLayoutVersion.getAllVersions().size()); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java index 3814fdd2596..ab8bd834f7a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java @@ -20,8 +20,8 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.junit.Test; @@ -41,15 +41,15 @@ public class TestKeyValueContainerData { private static final long MAXSIZE = (long) StorageUnit.GB.toBytes(5); - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestKeyValueContainerData(ChunkLayOutVersion layout) { + public TestKeyValueContainerData(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 700c6c2abef..c8bb93b26c3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -130,7 +130,8 @@ public void setup() throws Exception { */ @Test public void testDirectTableIterationDisabled() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { DatanodeStore store = refCountedDB.getStore(); assertTableIteratorUnsupported(store.getMetadataTable()); @@ -158,7 +159,8 @@ private void assertTableIteratorUnsupported(Table table) { */ @Test public void testBlockIteration() throws IOException { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB)); assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, @@ -278,7 +280,8 @@ public void testDelete() throws Exception { final long expectedRegularBlocks = TestDB.KEY_COUNT - numBlocksToDelete; - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { // Test results via block iteration. assertEquals(expectedDeletingBlocks, @@ -320,7 +323,8 @@ public void testReadDeletedBlockChunkInfo() throws Exception { new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> { }); - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { // Read blocks that were already deleted before the upgrade. List> deletedBlocks = refCountedDB.getStore() @@ -328,13 +332,13 @@ public void testReadDeletedBlockChunkInfo() throws Exception { Set preUpgradeBlocks = new HashSet<>(); - for(Table.KeyValue chunkListKV: deletedBlocks) { + for (Table.KeyValue chunkListKV: deletedBlocks) { preUpgradeBlocks.add(chunkListKV.getKey()); try { chunkListKV.getValue(); Assert.fail("No exception thrown when trying to retrieve old " + "deleted blocks values as chunk lists."); - } catch(IOException ex) { + } catch (IOException ex) { // Exception thrown as expected. } } @@ -370,7 +374,8 @@ public void testReadDeletedBlockChunkInfo() throws Exception { @Test public void testReadBlockData() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -395,12 +400,12 @@ public void testReadBlockData() throws Exception { Assert.assertEquals(TestDB.BLOCK_IDS, decodedKeys); // Test reading blocks with block iterator. - try(BlockIterator iter = + try (BlockIterator iter = refCountedDB.getStore().getBlockIterator()) { List iteratorBlockIDs = new ArrayList<>(); - while(iter.hasNext()) { + while (iter.hasNext()) { long localID = iter.nextBlock().getBlockID().getLocalID(); iteratorBlockIDs.add(Long.toString(localID)); } @@ -412,7 +417,8 @@ public void testReadBlockData() throws Exception { @Test public void testReadDeletingBlockData() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table blockDataTable = refCountedDB.getStore().getBlockDataTable(); @@ -446,12 +452,12 @@ public void testReadDeletingBlockData() throws Exception { MetadataKeyFilters.KeyPrefixFilter filter = MetadataKeyFilters.getDeletingKeyFilter(); - try(BlockIterator iter = + try (BlockIterator iter = refCountedDB.getStore().getBlockIterator(filter)) { List iteratorBlockIDs = new ArrayList<>(); - while(iter.hasNext()) { + while (iter.hasNext()) { long localID = iter.nextBlock().getBlockID().getLocalID(); iteratorBlockIDs.add(Long.toString(localID)); } @@ -463,7 +469,8 @@ public void testReadDeletingBlockData() throws Exception { @Test public void testReadMetadata() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table metadataTable = refCountedDB.getStore().getMetadataTable(); @@ -479,7 +486,8 @@ public void testReadMetadata() throws Exception { @Test public void testReadDeletedBlocks() throws Exception { - try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { + try (ReferenceCountedDB refCountedDB = + BlockUtils.getDB(newKvData(), conf)) { Table deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java index 00f68ef3dc7..85a8bda8a6e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestBlockData.java @@ -60,11 +60,11 @@ public void testAddAndRemove() { assertChunks(expected, computed); long offset = 0; int n = 5; - for(int i = 0; i < n; i++) { + for (int i = 0; i < n; i++) { offset += assertAddChunk(expected, computed, offset); } - for(; !expected.isEmpty();) { + for (; !expected.isEmpty();) { removeChunk(expected, computed); } } @@ -125,7 +125,7 @@ public void testSetChunks() { assertChunks(expected, computed); long offset = 0; int n = 5; - for(int i = 0; i < n; i++) { + for (int i = 0; i < n; i++) { offset += addChunk(expected, offset).getLen(); LOG.info("setChunk: {}", toString(expected)); computed.setChunks(expected); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java index 84f50087e1f..1ca9f9e8570 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/helpers/TestDatanodeVersionFile.java @@ -50,7 +50,7 @@ public class TestDatanodeVersionFile { private int lv; @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); @Before public void setup() throws IOException { @@ -70,7 +70,7 @@ public void setup() throws IOException { } @Test - public void testCreateAndReadVersionFile() throws IOException{ + public void testCreateAndReadVersionFile() throws IOException { //Check VersionFile exists assertTrue(versionFile.exists()); @@ -88,7 +88,7 @@ public void testCreateAndReadVersionFile() throws IOException{ } @Test - public void testIncorrectClusterId() throws IOException{ + public void testIncorrectClusterId() throws IOException { try { String randomClusterID = UUID.randomUUID().toString(); HddsVolumeUtil.getClusterID(properties, versionFile, @@ -100,7 +100,7 @@ public void testIncorrectClusterId() throws IOException{ } @Test - public void testVerifyCTime() throws IOException{ + public void testVerifyCTime() throws IOException { long invalidCTime = -10; dnVersionFile = new DatanodeVersionFile( storageID, clusterID, datanodeUUID, invalidCTime, lv); @@ -117,7 +117,7 @@ public void testVerifyCTime() throws IOException{ } @Test - public void testVerifyLayOut() throws IOException{ + public void testVerifyLayOut() throws IOException { int invalidLayOutVersion = 100; dnVersionFile = new DatanodeVersionFile( storageID, clusterID, datanodeUUID, cTime, invalidLayOutVersion); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index 4dc38e9a254..0bfdb173a47 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.ozone.test.GenericTestUtils; @@ -39,7 +39,7 @@ import java.time.Instant; import java.util.UUID; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -61,16 +61,16 @@ public class TestContainerDataYaml { private static final String VOLUME_OWNER = "hdfs"; private static final String CONTAINER_DB_TYPE = "RocksDB"; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; private OzoneConfiguration conf = new OzoneConfiguration(); - public TestContainerDataYaml(ChunkLayOutVersion layout) { + public TestContainerDataYaml(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } /** @@ -126,7 +126,7 @@ public void testCreateContainerFile() throws IOException { assertEquals(containerFile.getParent(), kvData.getChunksPath()); assertEquals(ContainerProtos.ContainerDataProto.State.OPEN, kvData .getState()); - assertEquals(layout, kvData.getLayOutVersion()); + assertEquals(layout, kvData.getLayoutVersion()); assertEquals(0, kvData.getMetadata().size()); assertEquals(MAXSIZE, kvData.getMaxSize()); assertEquals(MAXSIZE, kvData.getMaxSize()); @@ -160,7 +160,7 @@ public void testCreateContainerFile() throws IOException { assertEquals(containerFile.getParent(), kvData.getChunksPath()); assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, kvData .getState()); - assertEquals(layout, kvData.getLayOutVersion()); + assertEquals(layout, kvData.getLayoutVersion()); assertEquals(2, kvData.getMetadata().size()); assertEquals(VOLUME_OWNER, kvData.getMetadata().get(OzoneConsts.VOLUME)); assertEquals(OzoneConsts.OZONE, @@ -174,7 +174,7 @@ public void testCreateContainerFile() throws IOException { } @Test - public void testIncorrectContainerFile() throws IOException{ + public void testIncorrectContainerFile() throws IOException { try { String containerFile = "incorrect.container"; //Get file from resources folder @@ -217,7 +217,7 @@ public void testCheckBackWardCompatibilityOfContainerFile() throws .getChunksPath()); assertEquals("/hdds/current/aed-fg4-hji-jkl/containerDir0/1", kvData .getMetadataPath()); - assertEquals(FILE_PER_CHUNK, kvData.getLayOutVersion()); + assertEquals(FILE_PER_CHUNK, kvData.getLayoutVersion()); assertEquals(2, kvData.getMetadata().size()); } catch (Exception ex) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index 14f46d944f7..92ebbcacc1e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; @@ -63,15 +63,15 @@ public class TestContainerDeletionChoosingPolicy { private static final int SERVICE_TIMEOUT_IN_MILLISECONDS = 0; private static final int SERVICE_INTERVAL_IN_MILLISECONDS = 1000; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestContainerDeletionChoosingPolicy(ChunkLayOutVersion layout) { + public TestContainerDeletionChoosingPolicy(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 5e80a7edfda..7fbd7546c03 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -55,7 +55,7 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; @@ -115,15 +115,15 @@ public class TestContainerPersistence { @Rule public Timeout testTimeout = Timeout.seconds(300); - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestContainerPersistence(ChunkLayOutVersion layout) { + public TestContainerPersistence(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @BeforeClass diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java index f2af230a56b..d51d78e4adb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.ozone.test.GenericTestUtils; @@ -61,15 +61,15 @@ public class TestContainerSet { private static final int FIRST_ID = 2; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestContainerSet(ChunkLayOutVersion layout) { + public TestContainerSet(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Test @@ -122,11 +122,11 @@ public void testIteratorsAndCount() throws StorageContainerException { Iterator> iterator = containerSet.getContainerIterator(); int count = 0; - while(iterator.hasNext()) { + while (iterator.hasNext()) { Container kv = iterator.next(); ContainerData containerData = kv.getContainerData(); long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { + if (containerId % 2 == 0) { assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, containerData.getState()); } else { @@ -146,7 +146,7 @@ public void testIteratorsAndCount() throws StorageContainerException { Container kv = containerMapIterator.next().getValue(); ContainerData containerData = kv.getContainerData(); long containerId = containerData.getContainerID(); - if (containerId%2 == 0) { + if (containerId % 2 == 0) { assertEquals(ContainerProtos.ContainerDataProto.State.CLOSED, containerData.getState()); } else { @@ -167,12 +167,12 @@ public void testIteratorPerVolume() throws StorageContainerException { Mockito.when(vol2.getStorageID()).thenReturn("uuid-2"); ContainerSet containerSet = new ContainerSet(); - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { KeyValueContainerData kvData = new KeyValueContainerData(i, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString()); - if (i%2 == 0) { + if (i % 2 == 0) { kvData.setVolume(vol1); } else { kvData.setVolume(vol2); @@ -307,7 +307,7 @@ private ContainerSet createContainerSet() throws StorageContainerException { layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), UUID.randomUUID().toString()); - if (i%2 == 0) { + if (i % 2 == 0) { kvData.setState(ContainerProtos.ContainerDataProto.State.CLOSED); } else { kvData.setState(ContainerProtos.ContainerDataProto.State.OPEN); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index 618dd629138..9b8da361b2d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -50,7 +50,7 @@ import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.ozone.test.GenericTestUtils; @@ -83,17 +83,17 @@ public class TestHddsDispatcher { public static final Consumer NO_OP_ICR_SENDER = - c -> {}; + c -> { }; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestHddsDispatcher(ChunkLayOutVersion layout) { + public TestHddsDispatcher(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java index f969148a160..2b1bc3d248f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java @@ -82,7 +82,7 @@ public void setup() throws Exception { } @After - public void tearDown(){ + public void tearDown() { ContainerMetrics.remove(); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java index 83e44d3adf8..f2770d2941f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java @@ -198,7 +198,7 @@ public void testCRLStatusReportPublisher() throws IOException { GeneratedMessage report = ((CRLStatusReportPublisher) publisher).getReport(); Assert.assertNotNull(report); - for(Descriptors.FieldDescriptor descriptor : + for (Descriptors.FieldDescriptor descriptor : report.getDescriptorForType().getFields()) { if (descriptor.getNumber() == CRLStatusReport.RECEIVEDCRLID_FIELD_NUMBER) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java index 1b4265476a5..5f1b0a63200 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestDatanodeConfiguration.java @@ -28,8 +28,6 @@ import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.DISK_CHECK_MIN_GAP_DEFAULT; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.DISK_CHECK_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.DISK_CHECK_TIMEOUT_KEY; -import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.REPLICATION_MAX_STREAMS_DEFAULT; -import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.REPLICATION_STREAMS_LIMIT_KEY; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.FAILED_DATA_VOLUMES_TOLERATED_KEY; @@ -46,14 +44,12 @@ public class TestDatanodeConfiguration { @Test public void acceptsValidValues() { // GIVEN - int validReplicationLimit = 123; int validDeleteThreads = 42; long validDiskCheckIntervalMinutes = 60; int validFailedVolumesTolerated = 10; long validDiskCheckMinGap = 2; long validDiskCheckTimeout = 1; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, validReplicationLimit); conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, validDeleteThreads); conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY, validDiskCheckIntervalMinutes); @@ -70,7 +66,6 @@ public void acceptsValidValues() { DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class); // THEN - assertEquals(validReplicationLimit, subject.getReplicationMaxStreams()); assertEquals(validDeleteThreads, subject.getContainerDeleteThreads()); assertEquals(validDiskCheckIntervalMinutes, subject.getPeriodicDiskCheckIntervalMinutes()); @@ -87,14 +82,12 @@ public void acceptsValidValues() { @Test public void overridesInvalidValues() { // GIVEN - int invalidReplicationLimit = -5; int invalidDeleteThreads = 0; long invalidDiskCheckIntervalMinutes = -1; int invalidFailedVolumesTolerated = -2; long invalidDiskCheckMinGap = -1; long invalidDiskCheckTimeout = -1; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, invalidReplicationLimit); conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, invalidDeleteThreads); conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY, invalidDiskCheckIntervalMinutes); @@ -111,8 +104,6 @@ public void overridesInvalidValues() { DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class); // THEN - assertEquals(REPLICATION_MAX_STREAMS_DEFAULT, - subject.getReplicationMaxStreams()); assertEquals(CONTAINER_DELETE_THREADS_DEFAULT, subject.getContainerDeleteThreads()); assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT, @@ -136,8 +127,6 @@ public void isCreatedWitDefaultValues() { DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class); // THEN - assertEquals(REPLICATION_MAX_STREAMS_DEFAULT, - subject.getReplicationMaxStreams()); assertEquals(CONTAINER_DELETE_THREADS_DEFAULT, subject.getContainerDeleteThreads()); assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java index 7e1ea760356..de9968128e4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerCommandHandler.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; @@ -28,7 +28,7 @@ .DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; @@ -72,15 +72,15 @@ public class TestCloseContainerCommandHandler { private CloseContainerCommandHandler subject = new CloseContainerCommandHandler(); - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestCloseContainerCommandHandler(ChunkLayOutVersion layout) { + public TestCloseContainerCommandHandler(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before @@ -214,7 +214,7 @@ public void closeNonExistenceContainer() { } catch (IOException e) { GenericTestUtils.assertExceptionContains("The Container " + - "is not found. ContainerID: "+containerID, e); + "is not found. ContainerID: " + containerID, e); } } @@ -227,7 +227,7 @@ public void closeMissingContainer() { } catch (IOException e) { GenericTestUtils.assertExceptionContains("The Container is in " + "the MissingContainerSet hence we can't close it. " + - "ContainerID: "+containerID, e); + "ContainerID: " + containerID, e); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 990d4c95bf3..dfe7cb314b8 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -114,7 +114,7 @@ public void testRRPolicyExceptionMessage() throws Exception { try { policy.chooseVolume(volumes, blockSize); Assert.fail("expected to throw DiskOutOfSpaceException"); - } catch(DiskOutOfSpaceException e) { + } catch (DiskOutOfSpaceException e) { Assert.assertEquals("Not returning the expected message", "Out of space: The volume with the most available space (=" + 200 + " B) is less than the container size (=" + blockSize + " B).", diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java index 55b4c39b5dd..72faf570aea 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestStorageVolumeChecker.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; import org.apache.hadoop.ozone.container.common.ContainerTestUtils; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; @@ -98,10 +98,10 @@ public class TestStorageVolumeChecker { */ private final VolumeCheckResult expectedVolumeHealth; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; public TestStorageVolumeChecker(VolumeCheckResult result, - ChunkLayOutVersion layout) { + ContainerLayoutVersion layout) { this.expectedVolumeHealth = result; this.layout = layout; } @@ -127,7 +127,7 @@ public void cleanup() throws IOException { @Parameters public static Collection data() { List values = new ArrayList<>(); - for (ChunkLayOutVersion layout : ChunkLayOutVersion.values()) { + for (ContainerLayoutVersion layout : ContainerLayoutVersion.values()) { for (VolumeCheckResult result : VolumeCheckResult.values()) { values.add(new Object[]{result, layout}); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index 52bf3d32005..f0869c9c6f9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -225,7 +225,7 @@ public void testShutdown() throws Exception { } @Test - public void testFailVolumes() throws Exception{ + public void testFailVolumes() throws Exception { MutableVolumeSet volSet = null; File readOnlyVolumePath = new File(baseDir); //Set to readonly, so that this volume will be failed diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ChunkLayoutTestInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java similarity index 84% rename from hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ChunkLayoutTestInfo.java rename to hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java index 34e31b766e1..0adaa09bd83 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ChunkLayoutTestInfo.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerDummyImpl; import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; @@ -29,7 +29,7 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -37,7 +37,7 @@ /** * Interface of parameters for testing different chunk layout implementations. */ -public enum ChunkLayoutTestInfo { +public enum ContainerLayoutTestInfo { DUMMY { @Override @@ -51,7 +51,7 @@ public void validateFileCount(File dir, long blockCount, long chunkCount) { } @Override - public ChunkLayOutVersion getLayout() { + public ContainerLayoutVersion getLayout() { return null; } @@ -73,8 +73,8 @@ public void validateFileCount(File dir, long blockCount, long chunkCount) { } @Override - public ChunkLayOutVersion getLayout() { - return ChunkLayOutVersion.FILE_PER_CHUNK; + public ContainerLayoutVersion getLayout() { + return ContainerLayoutVersion.FILE_PER_CHUNK; } }, @@ -90,8 +90,8 @@ public void validateFileCount(File dir, long blockCount, long chunkCount) { } @Override - public ChunkLayOutVersion getLayout() { - return ChunkLayOutVersion.FILE_PER_BLOCK; + public ContainerLayoutVersion getLayout() { + return ContainerLayoutVersion.FILE_PER_BLOCK; } }; @@ -101,10 +101,10 @@ public abstract ChunkManager createChunkManager(boolean sync, public abstract void validateFileCount(File dir, long blockCount, long chunkCount); - public abstract ChunkLayOutVersion getLayout(); + public abstract ContainerLayoutVersion getLayout(); public void updateConfig(OzoneConfiguration config) { - config.set(OZONE_SCM_CHUNK_LAYOUT_KEY, getLayout().name()); + config.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, getLayout().name()); } private static void assertFileCount(File dir, long count) { @@ -116,8 +116,8 @@ private static void assertFileCount(File dir, long count) { assertEquals(count, files.length); } - public static Iterable chunkLayoutParameters() { - return ChunkLayOutVersion.getAllVersions().stream() + public static Iterable containerLayoutParameters() { + return ContainerLayoutVersion.getAllVersions().stream() .map(each -> new Object[] {each}) .collect(toList()); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index 264c6bbf1cb..fbf39f70284 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; @@ -42,8 +42,8 @@ import org.apache.ozone.test.GenericTestUtils; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import org.junit.After; import static org.junit.Assert.assertEquals; @@ -68,9 +68,9 @@ public class TestKeyValueBlockIterator { private OzoneConfiguration conf; private File testRoot; private ReferenceCountedDB db; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestKeyValueBlockIterator(ChunkLayOutVersion layout) { + public TestKeyValueBlockIterator(ContainerLayoutVersion layout) { this.layout = layout; } @@ -120,7 +120,7 @@ public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { // Default filter used is all unprefixed blocks. List unprefixedBlockIDs = blockIDs.get(""); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { Iterator blockIDIter = unprefixedBlockIDs.iterator(); @@ -152,7 +152,7 @@ public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception { @Test public void testKeyValueBlockIteratorWithNextBlock() throws Exception { List blockIDs = createContainerWithBlocks(CONTAINER_ID, 2); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { assertEquals((long)blockIDs.get(0), keyValueBlockIterator.nextBlock().getLocalID()); @@ -171,7 +171,7 @@ public void testKeyValueBlockIteratorWithNextBlock() throws Exception { @Test public void testKeyValueBlockIteratorWithHasNext() throws Exception { List blockIDs = createContainerWithBlocks(CONTAINER_ID, 2); - try(BlockIterator blockIter = + try (BlockIterator blockIter = db.getStore().getBlockIterator()) { // Even calling multiple times hasNext() should not move entry forward. @@ -209,7 +209,7 @@ public void testKeyValueBlockIteratorWithFilter() throws Exception { int deletingBlocks = 5; Map> blockIDs = createContainerWithBlocks(CONTAINER_ID, normalBlocks, deletingBlocks); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator( MetadataKeyFilters.getDeletingKeyFilter())) { List deletingBlockIDs = @@ -230,7 +230,7 @@ public void testKeyValueBlockIteratorWithFilter() throws Exception { public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws Exception { createContainerWithBlocks(CONTAINER_ID, 0, 5); - try(BlockIterator keyValueBlockIterator = + try (BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { //As all blocks are deleted blocks, blocks does not match with normal key // filter. @@ -288,7 +288,7 @@ public void testKeyValueBlockIteratorWithAdvancedFilter() throws */ private void testWithFilter(MetadataKeyFilters.KeyPrefixFilter filter, List expectedIDs) throws Exception { - try(BlockIterator iterator = + try (BlockIterator iterator = db.getStore().getBlockIterator(filter)) { // Test seek. iterator.seekToFirst(); @@ -364,7 +364,7 @@ private Map> createContainerWithBlocks(long containerId, Map prefixCounts) throws Exception { // Create required block data. Map> blockIDs = new HashMap<>(); - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) { List chunkList = new ArrayList<>(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 68cd2f6fb5c..16165725773 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.utils.db.DatanodeDBProfile; @@ -101,20 +101,20 @@ public class TestKeyValueContainer { private KeyValueContainer keyValueContainer; private UUID datanodeId; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; // Use one configuration object across parameterized runs of tests. // This preserves the column family options in the container options // cache for testContainersShareColumnFamilyOptions. private static final OzoneConfiguration CONF = new OzoneConfiguration(); - public TestKeyValueContainer(ChunkLayOutVersion layout) { + public TestKeyValueContainer(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before @@ -179,7 +179,7 @@ public void testContainerImportExport() throws Exception { //create a new one KeyValueContainerData containerData = new KeyValueContainerData(containerId, - keyValueContainerData.getLayOutVersion(), + keyValueContainerData.getLayoutVersion(), keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(), datanodeId.toString()); KeyValueContainer container = new KeyValueContainer(containerData, CONF); @@ -200,8 +200,8 @@ public void testContainerImportExport() throws Exception { containerData.getState()); assertEquals(numberOfKeysToWrite, containerData.getKeyCount()); - assertEquals(keyValueContainerData.getLayOutVersion(), - containerData.getLayOutVersion()); + assertEquals(keyValueContainerData.getLayoutVersion(), + containerData.getLayoutVersion()); assertEquals(keyValueContainerData.getMaxSize(), containerData.getMaxSize()); assertEquals(keyValueContainerData.getBytesUsed(), @@ -221,7 +221,7 @@ public void testContainerImportExport() throws Exception { //Import failure should cleanup the container directory containerData = new KeyValueContainerData(containerId + 1, - keyValueContainerData.getLayOutVersion(), + keyValueContainerData.getLayoutVersion(), keyValueContainerData.getMaxSize(), UUID.randomUUID().toString(), datanodeId.toString()); container = new KeyValueContainer(containerData, CONF); @@ -446,7 +446,7 @@ public void testContainerRocksDB() keyValueContainerData, CONF); keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId); - try(ReferenceCountedDB db = + try (ReferenceCountedDB db = BlockUtils.getDB(keyValueContainerData, CONF)) { RDBStore store = (RDBStore) db.getStore().getStore(); long defaultCacheSize = 64 * OzoneConsts.MB; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 71175b6144b..d50a091a6a4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -74,7 +74,7 @@ private static final Logger LOG = LoggerFactory.getLogger(TestKeyValueContainerCheck.class); - private final ChunkLayoutTestInfo chunkManagerTestInfo; + private final ContainerLayoutTestInfo chunkManagerTestInfo; private KeyValueContainer container; private KeyValueContainerData containerData; private MutableVolumeSet volumeSet; @@ -82,14 +82,15 @@ private File testRoot; private ChunkManager chunkManager; - public TestKeyValueContainerCheck(ChunkLayoutTestInfo chunkManagerTestInfo) { + public TestKeyValueContainerCheck( + ContainerLayoutTestInfo chunkManagerTestInfo) { this.chunkManagerTestInfo = chunkManagerTestInfo; } @Parameterized.Parameters public static Collection data() { return Arrays.asList(new Object[][] { - {ChunkLayoutTestInfo.FILE_PER_CHUNK}, - {ChunkLayoutTestInfo.FILE_PER_BLOCK} + {ContainerLayoutTestInfo.FILE_PER_CHUNK}, + {ContainerLayoutTestInfo.FILE_PER_BLOCK} }); } @@ -185,7 +186,7 @@ public void testKeyValueContainerCheckCorruption() throws Exception { try (RandomAccessFile file = new RandomAccessFile(chunkFile, "rws")) { file.setLength(length / 2); } - assertEquals(length/2, chunkFile.length()); + assertEquals(length / 2, chunkFile.length()); } // metadata check should pass. diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java index e1526dbd956..d7b520aba67 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerMarkUnhealthy.java @@ -21,7 +21,7 @@ import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; @@ -79,15 +79,15 @@ public class TestKeyValueContainerMarkUnhealthy { private KeyValueContainer keyValueContainer; private UUID datanodeId; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestKeyValueContainerMarkUnhealthy(ChunkLayOutVersion layout) { + public TestKeyValueContainerMarkUnhealthy(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 583d043e84b..a5d225469db 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.security.token.TokenVerifier; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; @@ -82,18 +82,18 @@ public class TestKeyValueHandler { private static final long DUMMY_CONTAINER_ID = 9999; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; private HddsDispatcher dispatcher; private KeyValueHandler handler; - public TestKeyValueHandler(ChunkLayOutVersion layout) { + public TestKeyValueHandler(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before @@ -261,7 +261,7 @@ public void testHandlerCommandHandling() throws Exception { } @Test - public void testVolumeSetInKeyValueHandler() throws Exception{ + public void testVolumeSetInKeyValueHandler() throws Exception { File path = GenericTestUtils.getRandomizedTestDir(); OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); @@ -296,7 +296,7 @@ public void testVolumeSetInKeyValueHandler() throws Exception{ try { new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), - cset, volumeSet, metrics, c->{}); + cset, volumeSet, metrics, c -> { }); } catch (RuntimeException ex) { GenericTestUtils.assertExceptionContains("class org.apache.hadoop" + ".ozone.container.common.impl.HddsDispatcher not org.apache" + diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java index d2b0f5e87c8..793aea5122a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java @@ -36,7 +36,7 @@ import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; import org.apache.commons.compress.compressors.CompressorOutputStream; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; import org.apache.commons.compress.archivers.tar.TarArchiveEntry; @@ -87,15 +87,15 @@ public class TestTarContainerPacker { private static final AtomicInteger CONTAINER_ID = new AtomicInteger(1); - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestTarContainerPacker(ChunkLayOutVersion layout) { + public TestTarContainerPacker(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @BeforeClass diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java index c1ab19fee80..bc6371e4005 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/AbstractTestChunkManager.java @@ -21,14 +21,14 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.VolumeIOStats; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; @@ -68,7 +68,7 @@ public abstract class AbstractTestChunkManager { @Rule public TemporaryFolder folder = new TemporaryFolder(); - protected abstract ChunkLayoutTestInfo getStrategy(); + protected abstract ContainerLayoutTestInfo getStrategy(); protected ChunkManager createTestSubject() { blockManager = new BlockManagerImpl(new OzoneConfiguration()); @@ -92,7 +92,7 @@ public final void setUp() throws Exception { .thenReturn(hddsVolume); keyValueContainerData = new KeyValueContainerData(1L, - ChunkLayOutVersion.getConfiguredVersion(config), + ContainerLayoutVersion.getConfiguredVersion(config), (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java index 23f690eed5d..defc02e78ec 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java @@ -214,7 +214,7 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception { BlockData blockData = new BlockData(blockID); // WHEN - for (int i = 0; i< count; i++) { + for (int i = 0; i < count; i++) { ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i), i * len, len); chunkManager.writeChunk(container, blockID, info, data, context); @@ -228,7 +228,7 @@ public void testWriteAndReadChunkMultipleTimes() throws Exception { assertTrue(getHddsVolume().getVolumeIOStats().getWriteTime() > 0); // WHEN - for (int i = 0; i< count; i++) { + for (int i = 0; i < count; i++) { ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localID, i), i * len, len); chunkManager.readChunk(container, blockID, info, context); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java index 34455a097a3..77eae5642a7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestBlockManagerImpl.java @@ -26,12 +26,12 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.ozone.test.GenericTestUtils; @@ -75,15 +75,15 @@ public class TestBlockManagerImpl { private BlockID blockID; private BlockID blockID1; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestBlockManagerImpl(ChunkLayOutVersion layout) { + public TestBlockManagerImpl(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java index d882ba4f9e4..e4bbe5073ea 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.Test; @@ -32,8 +32,8 @@ public class TestChunkManagerDummyImpl extends AbstractTestChunkManager { @Override - protected ChunkLayoutTestInfo getStrategy() { - return ChunkLayoutTestInfo.DUMMY; + protected ContainerLayoutTestInfo getStrategy() { + return ContainerLayoutTestInfo.DUMMY; } @Test diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java index f3be6e2fb3b..c3fc33fafb2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.Test; @@ -139,7 +139,7 @@ public void testPartialRead() throws Exception { } @Override - protected ChunkLayoutTestInfo getStrategy() { - return ChunkLayoutTestInfo.FILE_PER_BLOCK; + protected ContainerLayoutTestInfo getStrategy() { + return ContainerLayoutTestInfo.FILE_PER_BLOCK; } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java index 0286b3582d1..54812700f6d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerChunkStrategy.java @@ -22,9 +22,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; @@ -42,8 +42,8 @@ public class TestFilePerChunkStrategy extends CommonChunkManagerTestCases { @Override - protected ChunkLayoutTestInfo getStrategy() { - return ChunkLayoutTestInfo.FILE_PER_CHUNK; + protected ContainerLayoutTestInfo getStrategy() { + return ContainerLayoutTestInfo.FILE_PER_CHUNK; } @Test @@ -66,7 +66,7 @@ public void testWriteChunkStageWriteAndCommit() throws Exception { long term = 0; long index = 0; - File chunkFile = ChunkLayOutVersion.FILE_PER_CHUNK + File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK .getChunkFile(container.getContainerData(), blockID, chunkInfo); File tempChunkFile = new File(chunkFile.getParent(), chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER @@ -109,7 +109,7 @@ public void deletesChunkFileWithLengthIncludingOffset() throws Exception { ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen()); - File file = ChunkLayOutVersion.FILE_PER_CHUNK.getChunkFile( + File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile( container.getContainerData(), blockID, chunkInfo); ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java index 2cd96730085..674ae2dacef 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; @@ -94,9 +94,9 @@ public void setup() throws Exception { Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())) .thenReturn(hddsVolume); - for (int i=0; i<2; i++) { + for (int i = 0; i < 2; i++) { KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i, - ChunkLayOutVersion.FILE_PER_BLOCK, + ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString()); @@ -124,7 +124,7 @@ public void setup() throws Exception { private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List blockNames, int count) throws Exception { - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer .getContainerData(), conf)) { for (int i = 0; i < count; i++) { @@ -154,7 +154,7 @@ private List addBlocks(KeyValueContainer keyValueContainer, long containerId = keyValueContainer.getContainerData().getContainerID(); List blkNames = new ArrayList<>(); - try(ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer + try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer .getContainerData(), conf)) { for (int i = 0; i < blockCount; i++) { @@ -197,7 +197,7 @@ public void testContainerReader() throws Exception { Assert.assertEquals(2, containerSet.containerCount()); - for (int i=0; i < 2; i++) { + for (int i = 0; i < 2; i++) { Container keyValueContainer = containerSet.getContainer(i); KeyValueContainerData keyValueContainerData = (KeyValueContainerData) @@ -235,7 +235,7 @@ public void testContainerReaderWithLoadException() throws Exception { int containerCount = 3; for (int i = 0; i < containerCount; i++) { KeyValueContainerData keyValueContainerData = new KeyValueContainerData(i, - ChunkLayOutVersion.FILE_PER_BLOCK, + ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString()); KeyValueContainer keyValueContainer = @@ -285,7 +285,7 @@ public void testMultipleContainerReader() throws Exception { blockCount = containerCount; for (int i = 0; i < containerCount; i++) { KeyValueContainerData keyValueContainerData = - new KeyValueContainerData(i, ChunkLayOutVersion.FILE_PER_BLOCK, + new KeyValueContainerData(i, ContainerLayoutVersion.FILE_PER_BLOCK, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 1aa2940ceb1..8e1458dc479 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -43,7 +43,7 @@ import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; @@ -87,15 +87,15 @@ public class TestOzoneContainer { private HashMap commitSpaceMap; //RootDir -> committed space private final int numTestContainers = 10; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestOzoneContainer(ChunkLayOutVersion layout) { + public TestOzoneContainer(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java index cf6ece3b456..099ca9c2987 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestGrpcOutputStream.java @@ -126,18 +126,18 @@ public void seriesOfArraysExactlyFillBuffer() throws IOException { public void bufferFlushedWhenFull() throws IOException { byte[] bytes = getRandomBytes(bufferSize); - subject.write(bytes, 0, bufferSize-1); - subject.write(bytes[bufferSize-1]); + subject.write(bytes, 0, bufferSize - 1); + subject.write(bytes[bufferSize - 1]); verify(observer).onNext(any()); subject.write(bytes[0]); - subject.write(bytes, 1, bufferSize-1); + subject.write(bytes, 1, bufferSize - 1); verify(observer, times(2)).onNext(any()); } @Test public void singleArraySpansMultipleResponses() throws IOException { - byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize/2); + byte[] bytes = writeBytes(subject, 2 * bufferSize + bufferSize / 2); subject.close(); verifyResponses(bytes); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationConfig.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationConfig.java new file mode 100644 index 00000000000..6ab32d6cf9b --- /dev/null +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationConfig.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.container.replication; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig; +import org.junit.Test; + +import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_MAX_STREAMS_DEFAULT; +import static org.apache.hadoop.ozone.container.replication.ReplicationServer.ReplicationConfig.REPLICATION_STREAMS_LIMIT_KEY; +import static org.junit.Assert.assertEquals; + +/** + * Tests for {@link ReplicationConfig}. + */ +public class TestReplicationConfig { + + @Test + public void acceptsValidValues() { + // GIVEN + int validReplicationLimit = 123; + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, validReplicationLimit); + + // WHEN + ReplicationConfig subject = conf.getObject(ReplicationConfig.class); + + // THEN + assertEquals(validReplicationLimit, subject.getReplicationMaxStreams()); + } + + @Test + public void overridesInvalidValues() { + // GIVEN + int invalidReplicationLimit = -5; + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(REPLICATION_STREAMS_LIMIT_KEY, invalidReplicationLimit); + + // WHEN + ReplicationConfig subject = conf.getObject(ReplicationConfig.class); + + // THEN + assertEquals(REPLICATION_MAX_STREAMS_DEFAULT, + subject.getReplicationMaxStreams()); + } + + @Test + public void isCreatedWitDefaultValues() { + // GIVEN + OzoneConfiguration conf = new OzoneConfiguration(); + + // WHEN + ReplicationConfig subject = conf.getObject(ReplicationConfig.class); + + // THEN + assertEquals(REPLICATION_MAX_STREAMS_DEFAULT, + subject.getReplicationMaxStreams()); + } + +} diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java index c6dc3c6c1b5..8078fc25c89 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/TestReplicationSupervisor.java @@ -32,9 +32,9 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -58,7 +58,7 @@ @RunWith(Parameterized.class) public class TestReplicationSupervisor { - private final ContainerReplicator noopReplicator = task -> {}; + private final ContainerReplicator noopReplicator = task -> { }; private final ContainerReplicator throwingReplicator = task -> { throw new RuntimeException("testing replication failure"); }; @@ -75,15 +75,15 @@ public class TestReplicationSupervisor { private ContainerSet set; - private final ChunkLayOutVersion layout; + private final ContainerLayoutVersion layout; - public TestReplicationSupervisor(ChunkLayOutVersion layout) { + public TestReplicationSupervisor(ContainerLayoutVersion layout) { this.layout = layout; } @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } @Before diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java index ebcdfee551c..33395406438 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDataNodeStartupSlvLessThanMlv.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils; +import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Rule; @@ -61,7 +61,7 @@ public void testStartupSlvLessThanMlv() throws Exception { // Create version file with MLV > SLV, which should fail the // DataNodeStateMachine construction. - TestUpgradeUtils.createVersionFile(datanodeSubdir, + UpgradeTestUtils.createVersionFile(datanodeSubdir, HddsProtos.NodeType.DATANODE, mlv); try { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java index cb5257d5ea8..d882ca4ed4b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java @@ -144,7 +144,7 @@ public void testReadsDuringFinalization() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(1); Future readFuture = executor.submit(() -> { // Layout version check should be thread safe. - while(!dsm.getLayoutVersionManager() + while (!dsm.getLayoutVersionManager() .isAllowed(HDDSLayoutFeature.SCM_HA)) { readChunk(writeChunk, pipeline); } @@ -203,7 +203,7 @@ public void testImportContainer() throws Exception { ExecutorService executor = Executors.newFixedThreadPool(1); Future importFuture = executor.submit(() -> { // Layout version check should be thread safe. - while(!dsm.getLayoutVersionManager() + while (!dsm.getLayoutVersionManager() .isAllowed(HDDSLayoutFeature.SCM_HA)) { importContainer(exportContainerID, exportedContainerFile); readChunk(exportWriteChunk, pipeline); @@ -541,7 +541,7 @@ public void restartDatanode(int expectedMlv) * Get the cluster ID and SCM ID from SCM to the datanode. */ public void callVersionEndpointTask() throws Exception { - try(EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf, + try (EndpointStateMachine esm = ContainerTestUtils.createEndpoint(conf, address, 1000)) { VersionEndpointTask vet = new VersionEndpointTask(esm, conf, dsm.getContainer()); diff --git a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml index f4bfcef323e..2e27982d5ea 100644 --- a/hadoop-hdds/dev-support/checkstyle/checkstyle.xml +++ b/hadoop-hdds/dev-support/checkstyle/checkstyle.xml @@ -152,6 +152,7 @@ + diff --git a/hadoop-hdds/docs/content/feature/Observability.md b/hadoop-hdds/docs/content/feature/Observability.md index cab68780912..1ee95d8ade9 100644 --- a/hadoop-hdds/docs/content/feature/Observability.md +++ b/hadoop-hdds/docs/content/feature/Observability.md @@ -69,7 +69,7 @@ Tracing is turned off by default, but can be turned on with `hdds.tracing.enable ``` -Jager client can be configured with environment variables as documented [here](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md): +Jaeger client can be configured with environment variables as documented [here](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md): For example: diff --git a/hadoop-hdds/docs/content/feature/Observability.zh.md b/hadoop-hdds/docs/content/feature/Observability.zh.md new file mode 100644 index 00000000000..7a5c67b4cdd --- /dev/null +++ b/hadoop-hdds/docs/content/feature/Observability.zh.md @@ -0,0 +1,217 @@ +--- +title: "可观察性" +weight: 8 +menu: +main: +parent: 特性 +summary: Ozone 的不同工具来提高可观察性 +--- + + +Ozone 提供了多种工具来获取有关集群当前状态的更多信息。 + +## Prometheus +Ozone 原生支持 Prometheus 集成。所有内部指标(由 Hadoop 指标框架收集)都发布在 `/prom` 的 HTTP 端点下。(例如,在 SCM 的 http://localhost:9876/prom)。 + +Prometheus 端点默认是打开的,但可以通过`hdds.prometheus.endpoint.enabled`配置变量把它关闭。 + +在安全环境中,该页面是用 SPNEGO 认证来保护的,但 Prometheus 不支持这种认证。为了在安全环境中启用监控,可以配置一个特定的认证令牌。 + +`ozone-site.xml` 配置示例: + +```XML + + hdds.prometheus.endpoint.token + putyourtokenhere + +``` + +prometheus 配置示例: +```YAML +scrape_configs: + - job_name: ozone + bearer_token: + metrics_path: /prom + static_configs: + - targets: + - "127.0.0.1:9876" +``` + +## 分布式跟踪 +分布式跟踪可以通过可视化端到端的性能来帮助了解性能瓶颈。 + +Ozone 使用 [jaeger](https://jaegertracing.io) 跟踪库收集跟踪,可以将跟踪数据发送到任何兼容的后端(Zipkin,…)。 + +默认情况下,跟踪功能是关闭的,可以通过 `ozon-site.xml` 的 `hdds.tracing.enabled` 配置变量打开。 + +```XML + + hdds.tracing.enabled + true + +``` + +Jaeger 客户端可以用环境变量进行配置,如[这份](https://github.com/jaegertracing/jaeger-client-java/blob/master/jaeger-core/README.md)文档所述。 + +例如: + +```shell +JAEGER_SAMPLER_PARAM=0.01 +JAEGER_SAMPLER_TYPE=probabilistic +JAEGER_AGENT_HOST=jaeger +``` + +此配置将记录1%的请求,以限制性能开销。有关 Jaeger 抽样的更多信息,请查看[文档](https://www.jaegertracing.io/docs/1.18/sampling/#client-sampling-configuration)。 + +## Ozone Insight +Ozone Insight 是一个用于检查 Ozone 集群当前状态的工具,它可以显示特定组件的日志记录、指标和配置。 + +请使用`ozone insight list`命令检查可用的组件: + +```shell +> ozone insight list + +Available insight points: + + scm.node-manager SCM Datanode management related information. + scm.replica-manager SCM closed container replication manager + scm.event-queue Information about the internal async event delivery + scm.protocol.block-location SCM Block location protocol endpoint + scm.protocol.container-location SCM Container location protocol endpoint + scm.protocol.security SCM Block location protocol endpoint + om.key-manager OM Key Manager + om.protocol.client Ozone Manager RPC endpoint + datanode.pipeline More information about one ratis datanode ring. +``` + +## 配置 + +`ozone insight config` 可以显示与特定组件有关的配置(只支持选定的组件)。 + +```shell +> ozone insight config scm.replica-manager + +Configuration for `scm.replica-manager` (SCM closed container replication manager) + +>>> hdds.scm.replication.thread.interval + default: 300s + current: 300s + +There is a replication monitor thread running inside SCM which takes care of replicating the containers in the cluster. This property is used to configure the interval in which that thread runs. + + +>>> hdds.scm.replication.event.timeout + default: 30m + current: 30m + +Timeout for the container replication/deletion commands sent to datanodes. After this timeout the command will be retried. + +``` + +## 指标 +`ozone insight metrics` 可以显示与特定组件相关的指标(只支持选定的组件)。 +```shell +> ozone insight metrics scm.protocol.block-location +Metrics for `scm.protocol.block-location` (SCM Block location protocol endpoint) + +RPC connections + + Open connections: 0 + Dropped connections: 0 + Received bytes: 1267 + Sent bytes: 2420 + + +RPC queue + + RPC average queue time: 0.0 + RPC call queue length: 0 + + +RPC performance + + RPC processing time average: 0.0 + Number of slow calls: 0 + + +Message type counters + + Number of AllocateScmBlock: ??? + Number of DeleteScmKeyBlocks: ??? + Number of GetScmInfo: ??? + Number of SortDatanodes: ??? +``` + +## 日志 + +`ozone insights logs` 可以连接到所需的服务并显示与一个特定组件相关的DEBUG/TRACE日志。例如,显示RPC消息: + +```shell +>ozone insight logs om.protocol.client + +[OM] 2020-07-28 12:31:49,988 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol ServiceList request is received +[OM] 2020-07-28 12:31:50,095 [DEBUG|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] OzoneProtocol CreateVolume request is received +``` + +使用 `-v` 标志,也可以显示 protobuf 信息的内容(TRACE级别的日志): + +```shell +ozone insight logs -v om.protocol.client + +[OM] 2020-07-28 12:33:28,463 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is received: +cmdType: CreateVolume +traceID: "" +clientId: "client-A31DF5C6ECF2" +createVolumeRequest { + volumeInfo { + adminName: "hadoop" + ownerName: "hadoop" + volume: "vol1" + quotaInBytes: 1152921504606846976 + volumeAcls { + type: USER + name: "hadoop" + rights: "200" + aclScope: ACCESS + } + volumeAcls { + type: GROUP + name: "users" + rights: "200" + aclScope: ACCESS + } + creationTime: 1595939608460 + objectID: 0 + updateID: 0 + modificationTime: 0 + } +} + +[OM] 2020-07-28 12:33:28,474 [TRACE|org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB|OzoneProtocolMessageDispatcher] [service=OzoneProtocol] [type=CreateVolume] request is processed. Response: +cmdType: CreateVolume +traceID: "" +success: false +message: "Volume already exists" +status: VOLUME_ALREADY_EXISTS +``` + +

\ No newline at end of file diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.md b/hadoop-hdds/docs/content/security/SecuringTDE.md index c85da0f73fb..2a9898427c2 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.md @@ -58,10 +58,10 @@ To create an encrypted bucket, client need to: * Assign the encryption key to a bucket. ```bash - ozone sh bucket create -k encKey /vol/encryptedBucket + ozone sh bucket create -k encKey /vol/encryptedbucket ``` -After this command, all data written to the _encryptedBucket_ will be encrypted +After this command, all data written to the _encryptedbucket_ will be encrypted via the encKey and while reading the clients will talk to Key Management Server and read the key and decrypt it. In other words, the data stored inside Ozone is always encrypted. The fact that data is encrypted at rest @@ -71,20 +71,47 @@ will be completely transparent to the clients and end users. There are two ways to create an encrypted bucket that can be accessed via S3 Gateway. -####1. Create a bucket using shell under "/s3v" volume +#### Option 1. Create a bucket using shell under "/s3v" volume ```bash - ozone sh bucket create -k encKey /s3v/encryptedBucket + ozone sh bucket create -k encKey --layout=OBJECT_STORE /s3v/encryptedbucket ``` -####2. Create a link to an encrypted bucket under "/s3v" volume + +#### Option 2. Create a link to an encrypted bucket under "/s3v" volume ```bash - ozone sh bucket create -k encKey /vol/encryptedBucket - ozone sh bucket link /vol/encryptedBucket /s3v/linkencryptedbucket + ozone sh bucket create -k encKey --layout=OBJECT_STORE /vol/encryptedbucket + ozone sh bucket link /vol/encryptedbucket /s3v/linkencryptedbucket ``` -Note: An encrypted bucket cannot be created via S3 APIs. It must be done using Ozone shell commands as shown above. + +Note 1: An encrypted bucket cannot be created via S3 APIs. It must be done using Ozone shell commands as shown above. After creating an encrypted bucket, all the keys added to this bucket using s3g will be encrypted. +Note 2: `--layout=OBJECT_STORE` is specified in the above examples +for full compatibility with S3 (which is the default value for the `--layout` +argument, but explicitly added here to make a point). + +Bucket created with the `OBJECT_STORE` type will NOT be accessible via +HCFS (ofs or o3fs) at all. And such access will be rejected. For instance: + + ```bash + $ ozone fs -ls ofs://ozone1/s3v/encryptedbucket/ + -ls: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY. + ``` + + ```bash + $ ozone fs -ls o3fs://encryptedbucket.s3v.ozone1/ + 22/02/07 00:00:00 WARN fs.FileSystem: Failed to initialize fileystem o3fs://encryptedbucket.s3v.ozone1/: java.lang.IllegalArgumentException: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY. + -ls: Bucket: encryptedbucket has layout: OBJECT_STORE, which does not support file system semantics. Bucket Layout must be FILE_SYSTEM_OPTIMIZED or LEGACY. + ``` + +If one wants the bucket to be accessible from both S3G and HCFS (ofs and o3fs) +at the same time, use `--layout=FILE_SYSTEM_OPTIMIZED` instead. + +However, in buckets with `FILE_SYSTEM_OPTIMIZED` layout, some irregular S3 key +names may be rejected or normalized, which can be undesired. +See [Prefix based File System Optimization]({{< relref "../feature/PrefixFSO.md" >}}) for more information. + In non-secure mode, the user running the S3Gateway daemon process is the proxy user, while in secure mode the S3Gateway Kerberos principal (ozone.s3g.kerberos.principal) is the proxy user. S3Gateway proxy's all the users accessing the encrypted buckets to decrypt the key. @@ -111,12 +138,11 @@ The below two configurations must be added to the kms-site.xml to allow the S3Ga This is the host where the S3Gateway is running. Set this to '*' to allow requests from any hosts to be proxied. - - ``` -###KMS Authorization +### KMS Authorization + If Ranger authorization is enabled for KMS, then decrypt key permission should be given to access key id user(currently access key is kerberos principal) to decrypt the encrypted key to read/write a key in the encrypted bucket. diff --git a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md index bda5e76114a..d7a2911cbd6 100644 --- a/hadoop-hdds/docs/content/security/SecuringTDE.zh.md +++ b/hadoop-hdds/docs/content/security/SecuringTDE.zh.md @@ -49,7 +49,7 @@ hadoop.security.key.provider.path | KMS uri.
比如 kms://http@kms-host:96 * 将加密密钥分配给桶 ```bash - ozone sh bucket create -k encKey /vol/encryptedBucket + ozone sh bucket create -k encKey /vol/encryptedbucket ``` -这条命令执行后,所以写往 _encryptedBucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。 +这条命令执行后,所以写往 _encryptedbucket_ 的数据都会用 encKey 进行加密,当读取里面的数据时,客户端通过 KMS 获取密钥进行解密。换句话说,Ozone 中存储的数据一直是加密的,但用户和客户端对此完全无感知。 diff --git a/hadoop-hdds/docs/content/tools/TestTools.md b/hadoop-hdds/docs/content/tools/TestTools.md index ac025f0a321..83b40cb5f3d 100644 --- a/hadoop-hdds/docs/content/tools/TestTools.md +++ b/hadoop-hdds/docs/content/tools/TestTools.md @@ -106,131 +106,4 @@ Average Time spent in key write: 00:00:10,894 Total bytes written: 10240000 Total Execution time: 00:00:16,898 *********************** -``` - -## Genesis - -Genesis is a microbenchmarking tool. It's also included in the distribution (`ozone genesis`) but it doesn't require real cluster. It measures different part of the code in an isolated way (eg. the code which saves the data to the local RocksDB based key value stores) - -Example run: - -``` - ozone genesis -benchmark=BenchMarkRocksDbStore -# JMH version: 1.19 -# VM version: JDK 11.0.1, VM 11.0.1+13-LTS -# VM invoker: /usr/lib/jvm/java-11-openjdk-11.0.1.13-3.el7_6.x86_64/bin/java -# VM options: -Dproc_genesis -Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop -Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/opt/hadoop -Dhadoop.id.str=hadoop -Dhadoop.root.logger=INFO,console -Dhadoop.policy.file=hadoop-policy.xml -Dhadoop.security.logger=INFO,NullAppender -# Warmup: 2 iterations, 1 s each -# Measurement: 20 iterations, 1 s each -# Timeout: 10 min per iteration -# Threads: 4 threads, will synchronize iterations -# Benchmark mode: Throughput, ops/time -# Benchmark: org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test -# Parameters: (backgroundThreads = 4, blockSize = 8, maxBackgroundFlushes = 4, maxBytesForLevelBase = 512, maxOpenFiles = 5000, maxWriteBufferNumber = 16, writeBufferSize = 64) - -# Run progress: 0.00% complete, ETA 00:00:22 -# Fork: 1 of 1 -# Warmup Iteration 1: 213775.360 ops/s -# Warmup Iteration 2: 32041.633 ops/s -Iteration 1: 196342.348 ops/s - ?stack: - -Iteration 2: 41926.816 ops/s - ?stack: - -Iteration 3: 210433.231 ops/s - ?stack: - -Iteration 4: 46941.951 ops/s - ?stack: - -Iteration 5: 212825.884 ops/s - ?stack: - -Iteration 6: 145914.351 ops/s - ?stack: - -Iteration 7: 141838.469 ops/s - ?stack: - -Iteration 8: 205334.438 ops/s - ?stack: - -Iteration 9: 163709.519 ops/s - ?stack: - -Iteration 10: 162494.608 ops/s - ?stack: - -Iteration 11: 199155.793 ops/s - ?stack: - -Iteration 12: 209679.298 ops/s - ?stack: - -Iteration 13: 193787.574 ops/s - ?stack: - -Iteration 14: 127004.147 ops/s - ?stack: - -Iteration 15: 145511.080 ops/s - ?stack: - -Iteration 16: 223433.864 ops/s - ?stack: - -Iteration 17: 169752.665 ops/s - ?stack: - -Iteration 18: 165217.191 ops/s - ?stack: - -Iteration 19: 191038.476 ops/s - ?stack: - -Iteration 20: 196335.579 ops/s - ?stack: - - - -Result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test": - 167433.864 ?(99.9%) 43530.883 ops/s [Average] - (min, avg, max) = (41926.816, 167433.864, 223433.864), stdev = 50130.230 - CI (99.9%): [123902.981, 210964.748] (assumes normal distribution) - -Secondary result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test:?stack": -Stack profiler: - -....[Thread state distributions].................................................................... - 78.9% RUNNABLE - 20.0% TIMED_WAITING - 1.1% WAITING - -....[Thread state: RUNNABLE]........................................................................ - 59.8% 75.8% org.rocksdb.RocksDB.put - 16.5% 20.9% org.rocksdb.RocksDB.get - 0.7% 0.9% java.io.UnixFileSystem.delete0 - 0.7% 0.9% org.rocksdb.RocksDB.disposeInternal - 0.3% 0.4% java.lang.Long.formatUnsignedLong0 - 0.1% 0.2% org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test - 0.1% 0.1% java.lang.Long.toUnsignedString0 - 0.1% 0.1% org.apache.hadoop.ozone.genesis.generated.BenchMarkRocksDbStore_test_jmhTest.test_thrpt_jmhStub - 0.0% 0.1% java.lang.Object.clone - 0.0% 0.0% java.lang.Thread.currentThread - 0.4% 0.5% - -....[Thread state: TIMED_WAITING]................................................................... - 20.0% 100.0% java.lang.Object.wait - -....[Thread state: WAITING]......................................................................... - 1.1% 100.0% jdk.internal.misc.Unsafe.park - - - -# Run complete. Total time: 00:00:38 - -Benchmark (backgroundThreads) (blockSize) (maxBackgroundFlushes) (maxBytesForLevelBase) (maxOpenFiles) (maxWriteBufferNumber) (writeBufferSize) Mode Cnt Score Error Units -BenchMarkRocksDbStore.test 4 8 4 512 5000 16 64 thrpt 20 167433.864 ? 43530.883 ops/s -BenchMarkRocksDbStore.test:?stack 4 8 4 512 5000 16 64 thrpt NaN --- -``` +``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/tools/TestTools.zh.md b/hadoop-hdds/docs/content/tools/TestTools.zh.md index c6dfd2cf616..df02389c8a0 100644 --- a/hadoop-hdds/docs/content/tools/TestTools.zh.md +++ b/hadoop-hdds/docs/content/tools/TestTools.zh.md @@ -107,131 +107,4 @@ Average Time spent in key write: 00:00:10,894 Total bytes written: 10240000 Total Execution time: 00:00:16,898 *********************** -``` - -## Genesis - -Genesis 是一个微型的基准测试工具,它也包含在发行包中(`ozone genesis`),但是它不需要一个真实的集群,而是采用一种隔离的方法测试不同部分的代码(比如,将数据存储到本地基于 RocksDB 的键值存储中)。 - -运行示例: - -``` - ozone genesis -benchmark=BenchMarkRocksDbStore -# JMH version: 1.19 -# VM version: JDK 11.0.1, VM 11.0.1+13-LTS -# VM invoker: /usr/lib/jvm/java-11-openjdk-11.0.1.13-3.el7_6.x86_64/bin/java -# VM options: -Dproc_genesis -Djava.net.preferIPv4Stack=true -Dhadoop.log.dir=/var/log/hadoop -Dhadoop.log.file=hadoop.log -Dhadoop.home.dir=/opt/hadoop -Dhadoop.id.str=hadoop -Dhadoop.root.logger=INFO,console -Dhadoop.policy.file=hadoop-policy.xml -Dhadoop.security.logger=INFO,NullAppender -# Warmup: 2 iterations, 1 s each -# Measurement: 20 iterations, 1 s each -# Timeout: 10 min per iteration -# Threads: 4 threads, will synchronize iterations -# Benchmark mode: Throughput, ops/time -# Benchmark: org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test -# Parameters: (backgroundThreads = 4, blockSize = 8, maxBackgroundFlushes = 4, maxBytesForLevelBase = 512, maxOpenFiles = 5000, maxWriteBufferNumber = 16, writeBufferSize = 64) - -# Run progress: 0.00% complete, ETA 00:00:22 -# Fork: 1 of 1 -# Warmup Iteration 1: 213775.360 ops/s -# Warmup Iteration 2: 32041.633 ops/s -Iteration 1: 196342.348 ops/s - ?stack: - -Iteration 2: 41926.816 ops/s - ?stack: - -Iteration 3: 210433.231 ops/s - ?stack: - -Iteration 4: 46941.951 ops/s - ?stack: - -Iteration 5: 212825.884 ops/s - ?stack: - -Iteration 6: 145914.351 ops/s - ?stack: - -Iteration 7: 141838.469 ops/s - ?stack: - -Iteration 8: 205334.438 ops/s - ?stack: - -Iteration 9: 163709.519 ops/s - ?stack: - -Iteration 10: 162494.608 ops/s - ?stack: - -Iteration 11: 199155.793 ops/s - ?stack: - -Iteration 12: 209679.298 ops/s - ?stack: - -Iteration 13: 193787.574 ops/s - ?stack: - -Iteration 14: 127004.147 ops/s - ?stack: - -Iteration 15: 145511.080 ops/s - ?stack: - -Iteration 16: 223433.864 ops/s - ?stack: - -Iteration 17: 169752.665 ops/s - ?stack: - -Iteration 18: 165217.191 ops/s - ?stack: - -Iteration 19: 191038.476 ops/s - ?stack: - -Iteration 20: 196335.579 ops/s - ?stack: - - - -Result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test": - 167433.864 ?(99.9%) 43530.883 ops/s [Average] - (min, avg, max) = (41926.816, 167433.864, 223433.864), stdev = 50130.230 - CI (99.9%): [123902.981, 210964.748] (assumes normal distribution) - -Secondary result "org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test:?stack": -Stack profiler: - -....[Thread state distributions].................................................................... - 78.9% RUNNABLE - 20.0% TIMED_WAITING - 1.1% WAITING - -....[Thread state: RUNNABLE]........................................................................ - 59.8% 75.8% org.rocksdb.RocksDB.put - 16.5% 20.9% org.rocksdb.RocksDB.get - 0.7% 0.9% java.io.UnixFileSystem.delete0 - 0.7% 0.9% org.rocksdb.RocksDB.disposeInternal - 0.3% 0.4% java.lang.Long.formatUnsignedLong0 - 0.1% 0.2% org.apache.hadoop.ozone.genesis.BenchMarkRocksDbStore.test - 0.1% 0.1% java.lang.Long.toUnsignedString0 - 0.1% 0.1% org.apache.hadoop.ozone.genesis.generated.BenchMarkRocksDbStore_test_jmhTest.test_thrpt_jmhStub - 0.0% 0.1% java.lang.Object.clone - 0.0% 0.0% java.lang.Thread.currentThread - 0.4% 0.5% - -....[Thread state: TIMED_WAITING]................................................................... - 20.0% 100.0% java.lang.Object.wait - -....[Thread state: WAITING]......................................................................... - 1.1% 100.0% jdk.internal.misc.Unsafe.park - - - -# Run complete. Total time: 00:00:38 - -Benchmark (backgroundThreads) (blockSize) (maxBackgroundFlushes) (maxBytesForLevelBase) (maxOpenFiles) (maxWriteBufferNumber) (writeBufferSize) Mode Cnt Score Error Units -BenchMarkRocksDbStore.test 4 8 4 512 5000 16 64 thrpt 20 167433.864 ? 43530.883 ops/s -BenchMarkRocksDbStore.test:?stack 4 8 4 512 5000 16 64 thrpt NaN --- -``` +``` \ No newline at end of file diff --git a/hadoop-hdds/docs/content/tools/_index.md b/hadoop-hdds/docs/content/tools/_index.md index 090ba357b4b..12dd7f4faa1 100644 --- a/hadoop-hdds/docs/content/tools/_index.md +++ b/hadoop-hdds/docs/content/tools/_index.md @@ -62,6 +62,5 @@ Admin commands: Test tools: * **freon** - Runs the ozone load generator. - * **genesis** - Developer Only, Ozone micro-benchmark application. For more information see the following subpages: \ No newline at end of file diff --git a/hadoop-hdds/docs/content/tools/_index.zh.md b/hadoop-hdds/docs/content/tools/_index.zh.md index 43f4587e474..a8e91427193 100644 --- a/hadoop-hdds/docs/content/tools/_index.zh.md +++ b/hadoop-hdds/docs/content/tools/_index.zh.md @@ -57,6 +57,5 @@ Ozone 有一系列管理 Ozone 的命令行工具。 测试工具: * **freon** - 运行 Ozone 负载生成器。 - * **genesis** - Ozone 的 benchmark 应用,仅供开发者使用。 更多信息请参见下面的子页面: \ No newline at end of file diff --git a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html index 07c55b31649..1f558d9c604 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html +++ b/hadoop-hdds/docs/themes/ozonedoc/layouts/shortcodes/image.html @@ -16,4 +16,4 @@ --> - \ No newline at end of file + \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java index d89ecc68de3..14e63a1b303 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/protocolPB/SCMSecurityProtocolClientSideTranslatorPB.java @@ -366,7 +366,7 @@ public long revokeCertificates(List certIds, int reason, .setReason(Reason.valueOf(reason)) .setRevokeTime(revocationTime).build(); return submitRequest(Type.RevokeCertificates, - builder->builder.setRevokeCertificatesRequest(req)) + builder -> builder.setRevokeCertificatesRequest(req)) .getRevokeCertificatesResponseProto().getCrlId(); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index c484420ba04..77ef3f09a49 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto; @@ -52,6 +53,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto; @@ -61,6 +64,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusRequestProto; @@ -84,6 +89,7 @@ import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; @@ -249,6 +255,26 @@ public ContainerWithPipeline getContainerWithPipeline(long containerID) } + /** + * {@inheritDoc} + */ + @Override + public List + getContainerReplicas(long containerID) throws IOException { + Preconditions.checkState(containerID >= 0, + "Container ID cannot be negative"); + + GetContainerReplicasRequestProto request = + GetContainerReplicasRequestProto.newBuilder() + .setTraceID(TracingUtil.exportCurrentSpan()) + .setContainerID(containerID).build(); + + ScmContainerLocationResponse response = + submitRequest(Type.GetContainerReplicas, + (builder) -> builder.setGetContainerReplicasRequest(request)); + return response.getGetContainerReplicasResponse().getContainerReplicaList(); + } + /** * {@inheritDoc} */ @@ -306,7 +332,7 @@ public List getExistContainerWithPipelinesInBatch( response = submitRequest(Type.GetExistContainerWithPipelinesInBatch, (builder) -> builder .setGetExistContainerWithPipelinesInBatchRequest(request)); - } catch (IOException ex){ + } catch (IOException ex) { return cps; } @@ -735,13 +761,27 @@ public boolean getReplicationManagerStatus() throws IOException { } + @Override + public ReplicationManagerReport getReplicationManagerReport() + throws IOException { + ReplicationManagerReportRequestProto request = + ReplicationManagerReportRequestProto.newBuilder() + .setTraceID(TracingUtil.exportCurrentSpan()) + .build(); + ReplicationManagerReportResponseProto response = + submitRequest(Type.GetReplicationManagerReport, + builder -> builder.setReplicationManagerReportRequest(request)) + .getGetReplicationManagerReportResponse(); + return ReplicationManagerReport.fromProtobuf(response.getReport()); + } + @Override public boolean startContainerBalancer( - Optional threshold, Optional idleiterations, - Optional maxDatanodesRatioToInvolvePerIteration, + Optional threshold, Optional iterations, + Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, - Optional maxSizeLeavingSourceInGB) throws IOException{ + Optional maxSizeLeavingSourceInGB) throws IOException { StartContainerBalancerRequestProto.Builder builder = StartContainerBalancerRequestProto.newBuilder(); builder.setTraceID(TracingUtil.exportCurrentSpan()); @@ -749,8 +789,8 @@ public boolean startContainerBalancer( //make balancer configuration optional if (threshold.isPresent()) { double tsd = threshold.get(); - Preconditions.checkState(tsd >= 0.0D && tsd < 1.0D, - "threshold should to be specified in range [0.0, 1.0)."); + Preconditions.checkState(tsd >= 0.0D && tsd < 100D, + "threshold should be specified in range [0.0, 100.0)."); builder.setThreshold(tsd); } if (maxSizeToMovePerIterationInGB.isPresent()) { @@ -759,22 +799,22 @@ public boolean startContainerBalancer( "maxSizeToMovePerIterationInGB must be positive."); builder.setMaxSizeToMovePerIterationInGB(mstm); } - if (maxDatanodesRatioToInvolvePerIteration.isPresent()) { - double mdti = maxDatanodesRatioToInvolvePerIteration.get(); + if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { + int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); Preconditions.checkState(mdti >= 0, - "maxDatanodesRatioToInvolvePerIteration must be " + + "maxDatanodesPercentageToInvolvePerIteration must be " + "greater than equal to zero."); - Preconditions.checkState(mdti <= 1, - "maxDatanodesRatioToInvolvePerIteration must be " + - "lesser than equal to one."); - builder.setMaxDatanodesRatioToInvolvePerIteration(mdti); + Preconditions.checkState(mdti <= 100, + "maxDatanodesPercentageToInvolvePerIteration must be " + + "lesser than equal to hundred."); + builder.setMaxDatanodesPercentageToInvolvePerIteration(mdti); } - if (idleiterations.isPresent()) { - int idi = idleiterations.get(); - Preconditions.checkState(idi > 0 || idi == -1, - "idleiterations must be positive or" + - " -1(infinitly run container balancer)."); - builder.setIdleiterations(idi); + if (iterations.isPresent()) { + int i = iterations.get(); + Preconditions.checkState(i > 0 || i == -1, + "number of iterations must be positive or" + + " -1 (for running container balancer infinitely)."); + builder.setIterations(i); } if (maxSizeEnteringTargetInGB.isPresent()) { @@ -931,6 +971,18 @@ public Token getContainerToken( return OzonePBHelper.tokenFromProto(response.getToken()); } + @Override + public long getContainerCount() throws IOException { + GetContainerCountRequestProto request = + GetContainerCountRequestProto.newBuilder().build(); + + GetContainerCountResponseProto response = + submitRequest(Type.GetContainerCount, + builder -> builder.setGetContainerCountRequest(request)) + .getGetContainerCountResponse(); + return response.getContainerCount(); + } + @Override public Object getUnderlyingProxyObject() { return rpcProxy; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java index 6d544819d92..72da5194c36 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/CRLClientUpdateHandler.java @@ -68,7 +68,7 @@ public class CRLClientUpdateHandler implements ClientUpdateHandler { this.clientStore = serviceGrpcClient.getClientCRLStore(); this.crlCheckInterval = crlCheckInterval; - LOG.info("Pending CRL check interval : {}s", crlCheckInterval/1000); + LOG.info("Pending CRL check interval : {}s", crlCheckInterval / 1000); this.executorService = Executors.newSingleThreadScheduledExecutor( new ThreadFactoryBuilder().setDaemon(true) .setNameFormat("CRLUpdateHandler Thread - %d").build()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java index 721988ec7a0..5e326ccfea5 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/ClientCRLStore.java @@ -76,7 +76,7 @@ public void onRevokeCerts(CRLInfo crl) { public List getRevokedCertIds(X509CRL crl) { return Collections.unmodifiableList(crl.getRevokedCertificates().stream() - .map(cert->cert.getSerialNumber().longValue()) + .map(cert -> cert.getSerialNumber().longValue()) .collect(Collectors.toList())); } @@ -91,7 +91,7 @@ public void removePendingCrl(CRLInfo crl) { public List getPendingCrlIds() { return new ArrayList<>(pendingCrls) - .stream().map(crl->crl.getCrlSequenceID()) + .stream().map(crl -> crl.getCrlSequenceID()) .collect(Collectors.toList()); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java index 96e157711bb..8b96d5c0a98 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/update/client/SCMUpdateServiceGrpcClient.java @@ -92,7 +92,7 @@ public void start() { createChannel(); } clientId = subScribeClient(); - assert(clientId != null); + assert (clientId != null); // start background thread processing pending crl ids. handler = new CRLClientUpdateHandler(clientId, updateClient, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java index 3136168cc5a..6738868942b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/BaseApprover.java @@ -100,7 +100,7 @@ List getExtensionsList(Attribute attribute) { Objects.requireNonNull(attribute); List extensionsList = new ArrayList<>(); for (ASN1Encodable value : attribute.getAttributeValues()) { - if(value != null) { + if (value != null) { Extensions extensions = Extensions.getInstance(value); extensionsList.add(extensions); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index fc2a77b02bd..83be3aaf3ba 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -228,7 +228,7 @@ public Future requestCertificate( CompletableFuture xcertHolder = approver.inspectCSR(csr); - if(xcertHolder.isCompletedExceptionally()) { + if (xcertHolder.isCompletedExceptionally()) { // This means that approver told us there are things which it disagrees // with in this Certificate Request. Since the first set of sanity // checks failed, we just return the future object right here. @@ -324,7 +324,7 @@ public Future> revokeCertificates( public List listCertificate(NodeType role, long startSerialId, int count, boolean isRevoked) throws IOException { return store.listCertificate(role, BigInteger.valueOf(startSerialId), count, - isRevoked? CertificateStore.CertType.REVOKED_CERTS : + isRevoked ? CertificateStore.CertType.REVOKED_CERTS : CertificateStore.CertType.VALID_CERTS); } @@ -554,7 +554,7 @@ private void generateRootCertificate(SecurityConfig securityConfig, OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( ip -> { builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { + if (validator.isValid(ip.getCanonicalHostName())) { builder.addDnsName(ip.getCanonicalHostName()); } }); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java index a146c738d1b..da799d7d45f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/PKIProfiles/DefaultCAProfile.java @@ -53,7 +53,7 @@ private static boolean validateBasicExtensions(Extension ext, PKIProfile pkiProfile) { BasicConstraints constraints = BasicConstraints.getInstance(ext.getParsedValue()); - if(constraints.isCA()) { + if (constraints.isCA()) { if (pkiProfile.isCA()) { return true; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index d831c834fdd..d681806c12d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -516,7 +516,7 @@ public CertificateSignRequest.Builder getCSRBuilder() OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( ip -> { builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { + if (validator.isValid(ip.getCanonicalHostName())) { builder.addDnsName(ip.getCanonicalHostName()); } else { getLogger().error("Invalid domain {}", ip.getCanonicalHostName()); @@ -580,7 +580,7 @@ public void storeCertificate(String pemEncodedCert, boolean force, String certName = String.format(CERT_FILE_NAME_FORMAT, cert.getSerialNumber().toString()); - if(caCert) { + if (caCert) { certName = CA_CERT_PREFIX + certName; caCertId = cert.getSerialNumber().toString(); } @@ -688,17 +688,17 @@ protected enum InitCase { @Override public synchronized InitResponse init() throws CertificateException { int initCase = 0; - PrivateKey pvtKey= getPrivateKey(); + PrivateKey pvtKey = getPrivateKey(); PublicKey pubKey = getPublicKey(); X509Certificate certificate = getCertificate(); - if(pvtKey != null){ - initCase = initCase | 1<<2; + if (pvtKey != null) { + initCase = initCase | 1 << 2; } - if(pubKey != null){ - initCase = initCase | 1<<1; + if (pubKey != null) { + initCase = initCase | 1 << 1; } - if(certificate != null){ + if (certificate != null) { initCase = initCase | 1; } getLogger().info("Certificate client init case: {}", initCase); @@ -800,7 +800,7 @@ protected boolean recoverPublicKey() throws CertificateException { PublicKey pubKey = getCertificate().getPublicKey(); try { - if(validateKeyPair(pubKey)){ + if (validateKeyPair(pubKey)) { keyCodec.writePublicKey(pubKey); publicKey = pubKey; } else { @@ -922,7 +922,7 @@ public List listCA() throws IOException { updateCAList(); } return pemEncodedCACerts; - }finally { + } finally { lock.unlock(); } } @@ -947,7 +947,7 @@ public List updateCAList() throws IOException { } @Override - public boolean processCrl(CRLInfo crl){ + public boolean processCrl(CRLInfo crl) { List certIds2Remove = new ArrayList(); crl.getX509CRL().getRevokedCertificates().forEach( cert -> certIds2Remove.add(cert.getSerialNumber().toString())); @@ -957,15 +957,15 @@ public boolean processCrl(CRLInfo crl){ } - private boolean removeCertificates(List certIds){ + private boolean removeCertificates(List certIds) { lock.lock(); boolean reInitCert = false; try { // For now, remove self cert and ca cert is not implemented // both requires a restart of the service. - if ((certSerialId!=null && certIds.contains(certSerialId)) || - (caCertId!=null && certIds.contains(caCertId)) || - (rootCaCertId!=null && certIds.contains(rootCaCertId))) { + if ((certSerialId != null && certIds.contains(certSerialId)) || + (caCertId != null && certIds.contains(caCertId)) || + (rootCaCertId != null && certIds.contains(rootCaCertId))) { reInitCert = true; } @@ -1004,7 +1004,7 @@ public long getLocalCrlId() { * Set Local CRL id. * @param crlId */ - public void setLocalCrlId(long crlId){ + public void setLocalCrlId(long crlId) { this.localCrlId = crlId; } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java index 7aea5967df2..6143bd1030b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/OMCertificateClient.java @@ -44,8 +44,8 @@ public class OMCertificateClient extends DefaultCertificateClient { public OMCertificateClient(SecurityConfig securityConfig, String certSerialId, String localCrlId) { super(securityConfig, LOG, certSerialId, COMPONENT_NAME); - this.setLocalCrlId(localCrlId!=null ? - Long.parseLong(localCrlId): 0); + this.setLocalCrlId(localCrlId != null ? + Long.parseLong(localCrlId) : 0); } public OMCertificateClient(SecurityConfig securityConfig, diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java index b8d2859eed3..ec7b5a83f2a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java @@ -117,7 +117,7 @@ public static String getEncodedString(PKCS10CertificationRequest request) PemObject pemObject = new PemObject("CERTIFICATE REQUEST", request.getEncoded()); StringWriter str = new StringWriter(); - try(JcaPEMWriter pemWriter = new JcaPEMWriter(str)) { + try (JcaPEMWriter pemWriter = new JcaPEMWriter(str)) { pemWriter.writeObject(pemObject); } return str.toString(); @@ -135,7 +135,7 @@ public static PKCS10CertificationRequest getCertificationRequest(String csr) throws IOException { try (PemReader reader = new PemReader(new StringReader(csr))) { PemObject pemObject = reader.readPemObject(); - if(pemObject.getContent() == null) { + if (pemObject.getContent() == null) { throw new SCMSecurityException("Invalid Certificate signing request", INVALID_CSR); } @@ -268,10 +268,10 @@ public CertificateSignRequest.Builder setCA(Boolean isCA) { private Extension getKeyUsageExtension() throws IOException { int keyUsageFlag = KeyUsage.keyAgreement; - if(digitalEncryption){ + if (digitalEncryption) { keyUsageFlag |= KeyUsage.keyEncipherment | KeyUsage.dataEncipherment; } - if(digitalSignature) { + if (digitalSignature) { keyUsageFlag |= KeyUsage.digitalSignature; } @@ -303,7 +303,7 @@ private Extensions createExtensions() throws IOException { List extensions = new ArrayList<>(); // Add basic extension - if(ca) { + if (ca) { extensions.add(getBasicExtension()); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java index 5a9fba65b13..8aa512f6918 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java @@ -44,7 +44,7 @@ public class CRLInfo implements Comparator, private Instant revocationTime; private CRLInfo(X509CRL x509CRL, long creationTimestamp, long crlSequenceID) { - assert((x509CRL != null) && + assert ((x509CRL != null) && !x509CRL.getRevokedCertificates().isEmpty()); this.x509CRL = x509CRL; this.creationTimestamp = creationTimestamp; diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java index 3178cfdc3bb..2d53b8fb6fb 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfoCodec.java @@ -44,7 +44,7 @@ public CRLInfo fromPersistedFormat(byte[] rawData) throws IOException { try { return CRLInfo.fromProtobuf( HddsProtos.CRLInfoProto.PARSER.parseFrom(rawData)); - } catch (CertificateException|CRLException e) { + } catch (CertificateException | CRLException e) { throw new IllegalArgumentException( "Can't encode the the raw data from the byte array", e); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java index a3f1b6bc2c0..f4f188aaf39 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/ProfileServlet.java @@ -192,7 +192,7 @@ public Process runCmdAsync(List cmd) { protected static String generateFileName(Integer pid, Output output, Event event) { String outputFormat = output.name().toLowerCase(); - if(output == Output.FLAMEGRAPH) { + if (output == Output.FLAMEGRAPH) { outputFormat = "html"; } return FILE_PREFIX + pid + "-" + diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java index 87dc882a00f..3dc176644d9 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/DBCheckpointMetrics.java @@ -32,7 +32,7 @@ * This interface is for maintaining DB checkpoint statistics. */ @InterfaceAudience.Private -@Metrics(about="DB checkpoint Metrics", context="dfs") +@Metrics(about = "DB checkpoint Metrics", context = "dfs") public class DBCheckpointMetrics { private static final String SOURCE_NAME = DBCheckpointMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java index 78f8a80ae24..7f2deeb0935 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HAUtils.java @@ -404,7 +404,7 @@ public static List buildCAList(CertificateClient certClient, return getCAListWithRetry(() -> waitForCACerts( scmSecurityProtocolClient::listCACertificate, expectedCount), waitDuration); - } else{ + } else { return scmSecurityProtocolClient.listCACertificate(); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java index 508320e850f..e3b91ba8ed0 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java @@ -82,7 +82,7 @@ public static class KeyPrefixFilter implements MetadataKeyFilter { private int keysScanned = 0; private int keysHinted = 0; - public KeyPrefixFilter() {} + public KeyPrefixFilter() { } /** * KeyPrefixFilter constructor. It is made of positive and negative prefix diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java index ec4c0e1a255..1d1bff1bbc6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/TransactionInfo.java @@ -47,7 +47,7 @@ public final class TransactionInfo { private TransactionInfo(String transactionInfo) { String[] tInfo = transactionInfo.split(TRANSACTION_INFO_SPLIT_KEY); - Preconditions.checkState(tInfo.length==2, + Preconditions.checkState(tInfo.length == 2, "Incorrect TransactionInfo value"); term = Long.parseLong(tInfo[0]); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java index 50ac54f9211..c9bf38504f6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBConfigFromFile.java @@ -124,8 +124,8 @@ public static DBOptions readFromFile(String dbFileName, Env env = Env.getDefault(); DBOptions options = null; File configLocation = getConfigLocation(); - if(configLocation != null && - StringUtil.isNotBlank(configLocation.toString())){ + if (configLocation != null && + StringUtil.isNotBlank(configLocation.toString())) { Path optionsFile = Paths.get(configLocation.toString(), getOptionsFileNameFromDB(dbFileName)); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java index f0096ed9d83..2ac2bdc7306 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java @@ -187,4 +187,14 @@ void move(KEY sourceKey, KEY destKey, VALUE value, */ DBUpdatesWrapper getUpdatesSince(long sequenceNumber) throws SequenceNumberNotFoundException; + + /** + * Get limited data written to DB since a specific sequence number. + * @param sequenceNumber + * @param limitCount + * @return + * @throws SequenceNumberNotFoundException + */ + DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount) + throws SequenceNumberNotFoundException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index ad48a19927a..8b07003c9ce 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -168,7 +168,7 @@ private void applyDBDefinition(DBDefinition definition) { * @return DBStore */ public DBStore build() throws IOException { - if(StringUtil.isBlank(dbname) || (dbPath == null)) { + if (StringUtil.isBlank(dbname) || (dbPath == null)) { LOG.error("Required Parameter missing."); throw new IOException("Required parameter is missing. Please make sure " + "Path and DB name is provided."); @@ -340,7 +340,7 @@ private DBOptions getDBOptionsFromFile(Collection tableConfigs) { try { option = DBConfigFromFile.readFromFile(dbname, columnFamilyDescriptors); - if(option != null) { + if (option != null) { LOG.info("Using RocksDB DBOptions from {}.ini file", dbname); } } catch (IOException ex) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index b50b46225e0..eb71ec17838 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -382,7 +382,15 @@ public CodecRegistry getCodecRegistry() { @Override public DBUpdatesWrapper getUpdatesSince(long sequenceNumber) throws SequenceNumberNotFoundException { + return getUpdatesSince(sequenceNumber, Long.MAX_VALUE); + } + @Override + public DBUpdatesWrapper getUpdatesSince(long sequenceNumber, long limitCount) + throws SequenceNumberNotFoundException { + if (limitCount <= 0) { + throw new IllegalArgumentException("Illegal count for getUpdatesSince."); + } DBUpdatesWrapper dbUpdatesWrapper = new DBUpdatesWrapper(); try { TransactionLogIterator transactionLogIterator = @@ -415,6 +423,9 @@ public DBUpdatesWrapper getUpdatesSince(long sequenceNumber) } dbUpdatesWrapper.addWriteBatch(result.writeBatch().data(), result.sequenceNumber()); + if (currSequenceNumber - sequenceNumber >= limitCount) { + break; + } transactionLogIterator.next(); } } catch (RocksDBException e) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java index f92306ab43c..c7f6196a638 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java @@ -98,7 +98,7 @@ public TypedTable( if (cacheType == CacheType.FULL_CACHE) { cache = new FullTableCache<>(); //fill cache - try(TableIterator> tableIterator = + try (TableIterator> tableIterator = iterator()) { while (tableIterator.hasNext()) { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java index 7be2921b6a1..401d644bc84 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/CacheKey.java @@ -56,7 +56,7 @@ public int hashCode() { @Override public int compareTo(Object o) { - if(Objects.equals(key, ((CacheKey)o).key)) { + if (Objects.equals(key, ((CacheKey)o).key)) { return 0; } else { return key.toString().compareTo((((CacheKey) o).key).toString()); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java index d87e90d36d3..120a08bcee8 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/cache/EpochEntry.java @@ -63,7 +63,7 @@ public int hashCode() { @Override public int compareTo(Object o) { - if(this.epoch == ((EpochEntry)o).epoch) { + if (this.epoch == ((EpochEntry)o).epoch) { return 0; } else if (this.epoch < ((EpochEntry)o).epoch) { return -1; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java index 39bf0829079..a2b2e775c7c 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/MockCAStore.java @@ -92,7 +92,7 @@ public List listCertificate(NodeType role, } @Override - public void reinitialize(SCMMetadataStore metadataStore) {} + public void reinitialize(SCMMetadataStore metadataStore) { } @Override public List getCrls(List crlIds) throws IOException { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java index 3d32a3312c7..d6df77fc307 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java @@ -138,7 +138,7 @@ public void testWriteCRL() throws IOException, OperatorCreationException { assertTrue(crlFile.exists()); try (BufferedReader reader = new BufferedReader(new InputStreamReader( - new FileInputStream(crlFile), UTF_8))){ + new FileInputStream(crlFile), UTF_8))) { // Verify contents of the file String header = reader.readLine(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java index 5b1a1f032a3..1aab7a5de47 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestCertificateSignRequest.java @@ -279,8 +279,8 @@ private void verifyServiceId(Extensions extensions) { GeneralNames.fromExtensions( extensions, Extension.subjectAlternativeName); GeneralName[] names = gns.getNames(); - for(int i=0; i < names.length; i++) { - if(names[i].getTagNo() == GeneralName.otherName) { + for (int i = 0; i < names.length; i++) { + if (names[i].getTagNo() == GeneralName.otherName) { ASN1Encodable asn1Encodable = names[i].getName(); Iterator iterator = ((DLSequence) asn1Encodable).iterator(); while (iterator.hasNext()) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java index 1e3a8f4610a..776aa4af564 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificates/TestRootCertificate.java @@ -165,7 +165,7 @@ public void testCACert() OzoneSecurityUtil.getValidInetsForCurrentHost().forEach( ip -> { builder.addIpAddress(ip.getHostAddress()); - if(validator.isValid(ip.getCanonicalHostName())) { + if (validator.isValid(ip.getCanonicalHostName())) { builder.addDnsName(ip.getCanonicalHostName()); } }); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java index 9bad0f31070..2fef2b87369 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java @@ -79,7 +79,7 @@ public void testGenerateKeyWithSize() throws NoSuchProviderException, HDDSKeyGenerator keyGen = new HDDSKeyGenerator(config.getConfiguration()); KeyPair keyPair = keyGen.generateKey(4096); PublicKey publicKey = keyPair.getPublic(); - if(publicKey instanceof RSAPublicKey) { + if (publicKey instanceof RSAPublicKey) { Assert.assertEquals(4096, ((RSAPublicKey)(publicKey)).getModulus().bitLength()); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java index 99fcbae8045..e78bcb00855 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBStoreBuilder.java @@ -69,7 +69,7 @@ public void builderWithOneParamV1() throws IOException { public void builderWithOneParamV2() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } thrown.expect(IOException.class); @@ -82,7 +82,7 @@ public void builderWithOneParamV2() throws IOException { public void builderWithOpenClose() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } DBStore dbStore = DBStoreBuilder.newBuilder(conf) @@ -97,7 +97,7 @@ public void builderWithOpenClose() throws Exception { public void builderWithDoubleTableName() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } // Registering a new table with the same name should replace the previous @@ -127,7 +127,7 @@ public void builderWithDoubleTableName() throws Exception { public void builderWithDataWrites() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) @@ -156,7 +156,7 @@ public void builderWithDataWrites() throws Exception { public void builderWithDiskProfileWrites() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } try (DBStore dbStore = DBStoreBuilder.newBuilder(conf) diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java index 34d348f416d..ed8744ceba8 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java @@ -74,7 +74,7 @@ public void setUp() throws Exception { statistics.setStatsLevel(StatsLevel.ALL); options = options.setStatistics(statistics); configSet = new HashSet<>(); - for(String name : families) { + for (String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } @@ -288,44 +288,32 @@ public void testRocksDBCheckpointCleanup() throws Exception { } } - /** - * Not strictly a unit test. Just a confirmation of the expected behavior - * of RocksDB keyMayExist API. - * Expected behavior - On average, keyMayExist latency < key.get() latency - * for invalid keys. - * @throws Exception if unable to read from RocksDB. - */ @Test - public void testRocksDBKeyMayExistApi() throws Exception { + public void testGetDBUpdatesSince() throws Exception { + try (RDBStore newStore = new RDBStore(folder.newFolder(), options, configSet)) { - RocksDB db = newStore.getDb(); - //Test with 50 invalid keys. - long start = System.nanoTime(); - for (int i = 0; i < 50; i++) { - Assert.assertTrue(db.get( + try (Table firstTable = newStore.getTable(families.get(1))) { + firstTable.put( + org.apache.commons.codec.binary.StringUtils.getBytesUtf16("Key1"), org.apache.commons.codec.binary.StringUtils - .getBytesUtf16("key" + i)) == null); - } - long end = System.nanoTime(); - long keyGetLatency = end - start; - - start = System.nanoTime(); - for (int i = 0; i < 50; i++) { - Assert.assertFalse(db.keyMayExist( + .getBytesUtf16("Value1")); + firstTable.put( + org.apache.commons.codec.binary.StringUtils.getBytesUtf16("Key2"), org.apache.commons.codec.binary.StringUtils - .getBytesUtf16("key" + i), null)); + .getBytesUtf16("Value2")); } - end = System.nanoTime(); - long keyMayExistLatency = end - start; + Assert.assertTrue( + newStore.getDb().getLatestSequenceNumber() == 2); - Assert.assertTrue(keyMayExistLatency < keyGetLatency); + DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0); + Assert.assertEquals(2, dbUpdatesSince.getData().size()); } } @Test - public void testGetDBUpdatesSince() throws Exception { + public void testGetDBUpdatesSinceWithLimitCount() throws Exception { try (RDBStore newStore = new RDBStore(folder.newFolder(), options, configSet)) { @@ -343,8 +331,8 @@ public void testGetDBUpdatesSince() throws Exception { Assert.assertTrue( newStore.getDb().getLatestSequenceNumber() == 2); - DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0); - Assert.assertEquals(2, dbUpdatesSince.getData().size()); + DBUpdatesWrapper dbUpdatesSince = newStore.getUpdatesSince(0, 1); + Assert.assertEquals(1, dbUpdatesSince.getData().size()); } } @@ -370,7 +358,7 @@ public void testDowngrade() throws Exception { options.setCreateMissingColumnFamilies(true); configSet = new HashSet<>(); List familiesMinusOne = families.subList(0, families.size() - 1); - for(String name : familiesMinusOne) { + for (String name : familiesMinusOne) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java index fea40bbf30f..b49556df9f0 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStoreIterator.java @@ -92,7 +92,7 @@ public void testForeachRemainingCallsConsumerWithAllElements() { } @Test - public void testHasNextDependsOnIsvalid(){ + public void testHasNextDependsOnIsvalid() { when(rocksDBIteratorMock.isValid()).thenReturn(true, true, false); RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); @@ -169,7 +169,7 @@ public void testGettingTheKeyIfIteratorIsValid() { RDBStoreIterator iter = new RDBStoreIterator(rocksDBIteratorMock); byte[] key = null; - if(iter.hasNext()) { + if (iter.hasNext()) { ByteArrayKeyValue entry = iter.next(); key = entry.getKey(); } @@ -191,7 +191,7 @@ public void testGettingTheValueIfIteratorIsValid() { ByteArrayKeyValue entry; byte[] key = null; byte[] value = null; - if(iter.hasNext()) { + if (iter.hasNext()) { entry = iter.next(); key = entry.getKey(); value = entry.getValue(); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java index 5d007630e54..0f1858b902d 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java @@ -63,7 +63,7 @@ private static boolean consume(Table.KeyValue keyValue) { count++; try { Assert.assertNotNull(keyValue.getKey()); - } catch(IOException ex) { + } catch (IOException ex) { Assert.fail("Unexpected Exception " + ex.toString()); } return true; @@ -80,7 +80,7 @@ public void setUp() throws Exception { options = options.setStatistics(statistics); Set configSet = new HashSet<>(); - for(String name : families) { + for (String name : families) { TableConfig newConfig = new TableConfig(name, new ColumnFamilyOptions()); configSet.add(newConfig); } diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java index 073027f2639..837ea27e541 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java @@ -296,7 +296,7 @@ public void testTypedTableWithCacheWithFewDeletedOperationType() } ArrayList epochs = new ArrayList<>(); - for (long i=0; i<=5L; i++) { + for (long i = 0; i <= 5L; i++) { epochs.add(i); } testTable.cleanupCache(epochs); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java index a1cc7ddec5c..860a695cda4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/cache/TestTableCache.java @@ -74,13 +74,13 @@ public void create() { public void testPartialTableCache() { - for (int i = 0; i< 10; i++) { + for (int i = 0; i < 10; i++) { tableCache.put(new CacheKey<>(Integer.toString(i)), new CacheValue<>(Optional.of(Integer.toString(i)), i)); } - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertEquals(Integer.toString(i), tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } @@ -94,7 +94,7 @@ public void testPartialTableCache() { // On a full table cache if some one calls cleanup it is a no-op. tableCache.evictCache(epochs); - for (int i=5; i < 10; i++) { + for (int i = 5; i < 10; i++) { Assert.assertEquals(Integer.toString(i), tableCache.get(new CacheKey<>(Integer.toString(i))).getCacheValue()); } @@ -109,7 +109,7 @@ public void testPartialTableCacheWithNotContinousEntries() throws Exception { int cleanupCount = 0; ArrayList epochs = new ArrayList(); - for (long i=0; i(Integer.toString(i))).getCacheValue()); } @@ -357,13 +357,13 @@ public void testPartialTableCacheParallel() throws Exception { final int tc = totalCount; Assert.assertEquals(tc - deleted, tableCache.size()); // Check if we have remaining entries. - for (int i=6; i <= totalCount; i++) { + for (int i = 6; i <= totalCount; i++) { Assert.assertEquals(Integer.toString(i), tableCache.get( new CacheKey<>(Integer.toString(i))).getCacheValue()); } epochs = new ArrayList<>(); - for (long i=6; i<= totalCount; i++) { + for (long i = 6; i <= totalCount; i++) { epochs.add(i); } @@ -373,7 +373,7 @@ public void testPartialTableCacheParallel() throws Exception { Assert.assertEquals(0, tableCache.size()); } else { ArrayList epochs = new ArrayList<>(); - for (long i=0; i<= totalCount; i++) { + for (long i = 0; i <= totalCount; i++) { epochs.add(i); } tableCache.evictCache(epochs); @@ -453,7 +453,7 @@ public void testTableCacheWithNonConsecutiveEpochList() { tableCache.evictCache(epochs); - if(cacheType == TableCache.CacheType.PARTIAL_CACHE) { + if (cacheType == TableCache.CacheType.PARTIAL_CACHE) { Assert.assertTrue(tableCache.size() == 0); Assert.assertTrue(tableCache.getEpochEntrySet().size() == 0); } else { @@ -475,7 +475,7 @@ public void testTableCacheWithNonConsecutiveEpochList() { private int writeToCache(int count, int startVal, long sleep) throws InterruptedException { int counter = 1; - while (counter <= count){ + while (counter <= count) { tableCache.put(new CacheKey<>(Integer.toString(startVal)), new CacheValue<>(Optional.of(Integer.toString(startVal)), startVal)); startVal++; diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 4830c11b9be..e2d7b1663eb 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -74,6 +74,9 @@ message ScmContainerLocationRequest { optional ContainerBalancerStatusRequestProto containerBalancerStatusRequest = 35; optional FinalizeScmUpgradeRequestProto finalizeScmUpgradeRequest = 36; optional QueryUpgradeFinalizationProgressRequestProto queryUpgradeFinalizationProgressRequest = 37; + optional GetContainerCountRequestProto getContainerCountRequest = 38; + optional GetContainerReplicasRequestProto getContainerReplicasRequest = 39; + optional ReplicationManagerReportRequestProto replicationManagerReportRequest = 40; } message ScmContainerLocationResponse { @@ -119,6 +122,9 @@ message ScmContainerLocationResponse { optional ContainerBalancerStatusResponseProto containerBalancerStatusResponse = 35; optional FinalizeScmUpgradeResponseProto finalizeScmUpgradeResponse = 36; optional QueryUpgradeFinalizationProgressResponseProto queryUpgradeFinalizationProgressResponse = 37; + optional GetContainerCountResponseProto getContainerCountResponse = 38; + optional GetContainerReplicasResponseProto getContainerReplicasResponse = 39; + optional ReplicationManagerReportResponseProto getReplicationManagerReportResponse = 40; enum Status { OK = 1; @@ -162,6 +168,9 @@ enum Type { GetContainerBalancerStatus = 30; FinalizeScmUpgrade = 31; QueryUpgradeFinalizationProgress = 32; + GetContainerCount = 33; + GetContainerReplicas = 34; + GetReplicationManagerReport = 35; } /** @@ -210,6 +219,15 @@ message GetContainerWithPipelineResponseProto { required ContainerWithPipeline containerWithPipeline = 1; } +message GetContainerReplicasRequestProto { + required int64 containerID = 1; + optional string traceID = 2; +} + +message GetContainerReplicasResponseProto { + repeated SCMContainerReplicaProto containerReplica = 1; +} + message GetContainerWithPipelineBatchRequestProto { repeated int64 containerIDs = 1; optional string traceID = 2; @@ -383,6 +401,13 @@ message GetPipelineResponseProto { required Pipeline pipeline = 1; } +message GetContainerCountRequestProto { +} + +message GetContainerCountResponseProto { + required int64 containerCount = 1; +} + message ActivatePipelineRequestProto { required PipelineID pipelineID = 1; optional string traceID = 2; @@ -446,6 +471,14 @@ message ReplicationManagerStatusResponseProto { required bool isRunning = 1; } +message ReplicationManagerReportRequestProto { + optional string traceID = 1; +} + +message ReplicationManagerReportResponseProto { + required ReplicationManagerReportProto report = 1; +} + message FinalizeScmUpgradeRequestProto { required string upgradeClientId = 1; } @@ -479,11 +512,14 @@ message GetContainerTokenResponseProto { message StartContainerBalancerRequestProto { optional string traceID = 1; optional double threshold = 2; - optional int32 idleiterations = 3; - optional double maxDatanodesRatioToInvolvePerIteration = 4; + optional int32 idleiterations = 3 [deprecated = true]; + optional double maxDatanodesRatioToInvolvePerIteration = 4 [deprecated = + true]; optional int64 maxSizeToMovePerIterationInGB = 5; optional int64 maxSizeEnteringTargetInGB = 6; optional int64 maxSizeLeavingSourceInGB = 7; + optional int32 maxDatanodesPercentageToInvolvePerIteration = 8; + optional int32 iterations = 9; } message StartContainerBalancerResponseProto { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index 133f4c694fd..b55531e8a79 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -379,3 +379,28 @@ message ContainerReplicaHistoryProto { required int64 bcsId = 4; } +message SCMContainerReplicaProto { + required int64 containerID = 1; + required string state = 2; + required DatanodeDetailsProto datanodeDetails = 3; + required string placeOfBirth = 4; + required int64 sequenceID = 5; + required int64 keyCount = 6; + required int64 bytesUsed = 7; +} + +message KeyContainerIDList { + required string key = 1; + repeated ContainerID container = 2; +} + +message KeyIntValue { + required string key = 1; + optional int64 value = 2; +} + +message ReplicationManagerReportProto { + required int64 timestamp = 1; + repeated KeyIntValue stat = 2; + repeated KeyContainerIDList statSample = 3; +} diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index d7a5e720b5d..0b5071a8585 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -106,16 +106,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> assertj-core test
- - org.openjdk.jmh - jmh-core - test - - - org.openjdk.jmh - jmh-generator-annprocess - test - org.mockito mockito-core diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 3c5fdc0f2ee..78a87b88201 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -442,7 +442,7 @@ public void onMessage( commitTransactions(ackProto.getResultsList(), UUID.fromString(ackProto.getDnId())); metrics.incrBlockDeletionCommandSuccess(); - } else if (status == CommandStatus.Status.FAILED){ + } else if (status == CommandStatus.Status.FAILED) { metrics.incrBlockDeletionCommandFailure(); } else { LOG.error("Delete Block Command is not executed yet."); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java index 26368b46e46..3dab4ad83f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java @@ -100,15 +100,15 @@ public Set getReplica() { @Override public String toString() { - return "Container State: " +container.getState()+ - " Replica Count: "+replica.size()+ - " Healthy Count: "+healthyCount+ - " Decommission Count: "+decommissionCount+ - " Maintenance Count: "+maintenanceCount+ - " inFlightAdd Count: "+inFlightAdd+ - " inFightDel Count: "+inFlightDel+ - " ReplicationFactor: "+repFactor+ - " minMaintenance Count: "+minHealthyForMaintenance; + return "Container State: " + container.getState() + + " Replica Count: " + replica.size() + + " Healthy Count: " + healthyCount + + " Decommission Count: " + decommissionCount + + " Maintenance Count: " + maintenanceCount + + " inFlightAdd Count: " + inFlightAdd + + " inFightDel Count: " + inFlightDel + + " ReplicationFactor: " + repFactor + + " minMaintenance Count: " + minHealthyForMaintenance; } /** @@ -269,4 +269,14 @@ public boolean isHealthy() { .allMatch(r -> ReplicationManager.compareState( container.getState(), r.getState())); } + + /** + * Returns true is there are no replicas of a container available, ie the + * set of container replica passed in the constructor has zero entries. + * + * @return true if there are no replicas, false otherwise. + */ + public boolean isMissing() { + return replica.size() == 0; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index 32804d7a8d2..8a50884321b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -164,7 +164,7 @@ private void processContainerReplicas(final DatanodeDetails datanodeDetails, try { processContainerReplica(datanodeDetails, replicaProto, publisher); } catch (ContainerNotFoundException e) { - if(unknownContainerHandleAction.equals( + if (unknownContainerHandleAction.equals( UNKNOWN_CONTAINER_ACTION_WARN)) { LOG.error("Received container report for an unknown container" + " {} from datanode {}.", replicaProto.getContainerID(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index 47842c0ad59..59e73a36514 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -87,6 +87,7 @@ import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.hdds.utils.db.Table; import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState; import com.google.protobuf.GeneratedMessage; import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; @@ -254,6 +255,11 @@ public enum MoveResult { */ private final MoveScheduler moveScheduler; + /** + * Report object that is refreshed each time replication Manager runs. + */ + private ReplicationManagerReport containerReport; + /** * Constructs ReplicationManager instance with the given configuration. * @@ -286,6 +292,7 @@ public ReplicationManager(final ConfigurationSource conf, this.inflightMoveFuture = new ConcurrentHashMap<>(); this.minHealthyForMaintenance = rmConf.getMaintenanceReplicaMinimum(); this.clock = clock; + this.containerReport = new ReplicationManagerReport(); this.waitTimeInMillis = conf.getTimeDuration( HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, @@ -363,16 +370,29 @@ public synchronized void stop() { * This in intended to be used in tests. */ public synchronized void processAll() { + if (!shouldRun()) { + LOG.info("Replication Manager is not ready to run until {}ms after " + + "safemode exit", waitTimeInMillis); + return; + } final long start = clock.millis(); final List containers = containerManager.getContainers(); - containers.forEach(this::processContainer); - + ReplicationManagerReport report = new ReplicationManagerReport(); + for (ContainerInfo c : containers) { + processContainer(c, report); + } + report.setComplete(); + containerReport = report; LOG.info("Replication Monitor Thread took {} milliseconds for" + " processing {} containers.", clock.millis() - start, containers.size()); } + public ReplicationManagerReport getContainerReport() { + return containerReport; + } + /** * ReplicationMonitor thread runnable. This wakes up at configured * interval and processes all the containers in the system. @@ -398,7 +418,9 @@ private synchronized void run() { * * @param container ContainerInfo */ - private void processContainer(ContainerInfo container) { + @SuppressWarnings("checkstyle:methodlength") + private void processContainer(ContainerInfo container, + ReplicationManagerReport report) { if (!shouldRun()) { return; } @@ -410,6 +432,7 @@ private void processContainer(ContainerInfo container) { final Set replicas = containerManager .getContainerReplicas(id); final LifeCycleState state = container.getState(); + report.increment(state); /* * We don't take any action if the container is in OPEN state and @@ -418,6 +441,8 @@ private void processContainer(ContainerInfo container) { */ if (state == LifeCycleState.OPEN) { if (!isOpenContainerHealthy(container, replicas)) { + report.incrementAndSample( + HealthState.OPEN_UNHEALTHY, container.containerID()); eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, id); } return; @@ -442,10 +467,14 @@ private void processContainer(ContainerInfo container) { * If the container is in QUASI_CLOSED state, check and close the * container if possible. */ - if (state == LifeCycleState.QUASI_CLOSED && - canForceCloseContainer(container, replicas)) { - forceCloseContainer(container, replicas); - return; + if (state == LifeCycleState.QUASI_CLOSED) { + if (canForceCloseContainer(container, replicas)) { + forceCloseContainer(container, replicas); + return; + } else { + report.incrementAndSample(HealthState.QUASI_CLOSED_STUCK, + container.containerID()); + } } /* @@ -458,7 +487,7 @@ private void processContainer(ContainerInfo container) { updateInflightAction(container, inflightReplication, action -> replicas.stream() .anyMatch(r -> r.getDatanodeDetails().equals(action.datanode)), - ()-> metrics.incrNumReplicationCmdsTimeout(), + () -> metrics.incrNumReplicationCmdsTimeout(), action -> updateCompletedReplicationMetrics(container, action)); updateInflightAction(container, inflightDeletion, @@ -498,6 +527,8 @@ private void processContainer(ContainerInfo container) { * exact number of replicas in the same state. */ if (isContainerEmpty(container, replicas)) { + report.incrementAndSample( + HealthState.EMPTY, container.containerID()); /* * If container is empty, schedule task to delete the container. */ @@ -509,8 +540,22 @@ private void processContainer(ContainerInfo container) { * Check if the container is under replicated and take appropriate * action. */ - if (!replicaSet.isSufficientlyReplicated() - || !placementStatus.isPolicySatisfied()) { + boolean sufficientlyReplicated = replicaSet.isSufficientlyReplicated(); + boolean placementSatisfied = placementStatus.isPolicySatisfied(); + if (!sufficientlyReplicated || !placementSatisfied) { + if (!sufficientlyReplicated) { + report.incrementAndSample( + HealthState.UNDER_REPLICATED, container.containerID()); + if (replicaSet.isMissing()) { + report.incrementAndSample(HealthState.MISSING, + container.containerID()); + } + } + if (!placementSatisfied) { + report.incrementAndSample(HealthState.MIS_REPLICATED, + container.containerID()); + + } handleUnderReplicatedContainer(container, replicaSet, placementStatus); return; @@ -521,6 +566,8 @@ private void processContainer(ContainerInfo container) { * action. */ if (replicaSet.isOverReplicated()) { + report.incrementAndSample(HealthState.OVER_REPLICATED, + container.containerID()); handleOverReplicatedContainer(container, replicaSet); return; } @@ -531,6 +578,8 @@ private void processContainer(ContainerInfo container) { are not in the same state as the container itself. */ if (!replicaSet.isHealthy()) { + report.incrementAndSample(HealthState.UNHEALTHY, + container.containerID()); handleUnstableContainer(container, replicas); } } @@ -575,7 +624,7 @@ private void updateInflightAction(final ContainerInfo container, final List actions = inflightActions.get(id); Iterator iter = actions.iterator(); - while(iter.hasNext()) { + while (iter.hasNext()) { try { InflightAction a = iter.next(); NodeStatus status = nodeManager.getNodeStatus(a.datanode); @@ -870,7 +919,7 @@ public CompletableFuture move(ContainerID cid, */ private boolean isPolicySatisfiedAfterMove(ContainerInfo cif, DatanodeDetails srcDn, DatanodeDetails targetDn, - final List replicas){ + final List replicas) { Set movedReplicas = replicas.stream().collect(Collectors.toSet()); movedReplicas.removeIf(r -> r.getDatanodeDetails().equals(srcDn)); @@ -1108,7 +1157,7 @@ private void handleUnderReplicatedContainer(final ContainerInfo container, if (replicaSet.isSufficientlyReplicated() && placementStatus.isPolicySatisfied()) { - LOG.info("The container {} with replicas {} is sufficiently "+ + LOG.info("The container {} with replicas {} is sufficiently " + "replicated and is not mis-replicated", container.getContainerID(), replicaSet); return; @@ -1299,8 +1348,8 @@ private void deleteSrcDnForMove(final ContainerInfo cif, ContainerReplicaCount replicaCount = getContainerReplicaCount(cif, replicaSet); - if(!replicaSet.stream() - .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))){ + if (!replicaSet.stream() + .anyMatch(r -> r.getDatanodeDetails().equals(srcDn))) { // if the target is present but source disappears somehow, // we can consider move is successful. compleleteMoveFutureWithResult(cid, MoveResult.COMPLETED); @@ -1605,7 +1654,7 @@ private NodeStatus getNodeStatus(DatanodeDetails dn) { try { return nodeManager.getNodeStatus(dn); } catch (NodeNotFoundException e) { - throw new IllegalStateException("Unable to find NodeStatus for "+dn, e); + throw new IllegalStateException("Unable to find NodeStatus for " + dn, e); } } @@ -1895,7 +1944,7 @@ public void startMove(HddsProtos.ContainerID contianerIDProto, try { cid = ContainerID.getFromProtobuf(contianerIDProto); mp = MoveDataNodePair.getFromProtobuf(mdnpp); - if(!inflightMove.containsKey(cid)) { + if (!inflightMove.containsKey(cid)) { transactionBuffer.addToBuffer(moveTable, cid, mp); inflightMove.putIfAbsent(cid, mp); } @@ -2006,8 +2055,8 @@ private void onLeaderReadyAndOutOfSafeMode() { boolean isTgtExist = replicas.stream() .anyMatch(r -> r.getDatanodeDetails().equals(v.getTgt())); - if(isSrcExist) { - if(isTgtExist) { + if (isSrcExist) { + if (isTgtExist) { //the former scm leader may or may not send the deletion command //before reelection.here, we just try to send the command again. deleteSrcDnForMove(cif, replicas); @@ -2032,8 +2081,8 @@ private void onLeaderReadyAndOutOfSafeMode() { * complete the CompletableFuture of the container in the given Map with * a given MoveResult. */ - private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr){ - if(inflightMoveFuture.containsKey(cid)) { + private void compleleteMoveFutureWithResult(ContainerID cid, MoveResult mr) { + if (inflightMoveFuture.containsKey(cid)) { inflightMoveFuture.get(cid).complete(mr); inflightMoveFuture.remove(cid); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java index a975f04cfc0..018f0dfd252 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/AbstractFindTargetGreedy.java @@ -70,7 +70,7 @@ protected void setPotentialTargets(Collection pt) { potentialTargets = pt; } - private void setUpperLimit(Double upperLimit){ + private void setUpperLimit(Double upperLimit) { this.upperLimit = upperLimit; } @@ -199,12 +199,12 @@ private boolean canSizeEnterTarget(DatanodeDetails target, long size) { */ @Override public void increaseSizeEntering(DatanodeDetails target, long size) { - if(sizeEnteringNode.containsKey(target)) { + if (sizeEnteringNode.containsKey(target)) { long totalEnteringSize = sizeEnteringNode.get(target) + size; sizeEnteringNode.put(target, totalEnteringSize); potentialTargets.removeIf( c -> c.getDatanodeDetails().equals(target)); - if(totalEnteringSize < config.getMaxSizeEnteringTarget()) { + if (totalEnteringSize < config.getMaxSizeEnteringTarget()) { //reorder potentialTargets.add(nodeManager.getUsageInfo(target)); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java index d7d3b6617fb..995a5da111b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancer.java @@ -74,7 +74,7 @@ public class ContainerBalancer { private long maxSizeToMovePerIteration; private int countDatanodesInvolvedPerIteration; private long sizeMovedPerIteration; - private int idleIteration; + private int iterations; private List unBalancedNodes; private List overUtilizedNodes; private List underUtilizedNodes; @@ -155,9 +155,12 @@ public boolean start(ContainerBalancerConfiguration balancerConfiguration) { return false; } - balancerRunning = true; this.config = balancerConfiguration; - validateConfiguration(config); + if (!validateConfiguration(config)) { + return false; + } + ozoneConfiguration.setFromObject(balancerConfiguration); + balancerRunning = true; LOG.info("Starting Container Balancer...{}", this); //we should start a new balancer thread async @@ -176,23 +179,12 @@ public boolean start(ContainerBalancerConfiguration balancerConfiguration) { * Balances the cluster. */ private void balance() { - this.idleIteration = config.getIdleIteration(); - if(this.idleIteration == -1) { + this.iterations = config.getIterations(); + if (this.iterations == -1) { //run balancer infinitely - this.idleIteration = Integer.MAX_VALUE; - } - this.threshold = config.getThreshold(); - this.maxDatanodesRatioToInvolvePerIteration = - config.getMaxDatanodesRatioToInvolvePerIteration(); - this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration(); - if (config.getNetworkTopologyEnable()) { - findTargetStrategy = new FindTargetGreedyByNetworkTopology( - containerManager, placementPolicy, nodeManager, networkTopology); - } else { - findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, - placementPolicy, nodeManager); + this.iterations = Integer.MAX_VALUE; } - for (int i = 0; i < idleIteration && balancerRunning; i++) { + for (int i = 0; i < iterations && balancerRunning; i++) { // stop balancing if iteration is not initialized if (!initializeIteration()) { stop(); @@ -215,7 +207,7 @@ private void balance() { // wait for configured time before starting next iteration, unless // this was the final iteration - if (i != idleIteration - 1) { + if (i != iterations - 1) { synchronized (this) { try { wait(config.getBalancingInterval().toMillis()); @@ -257,6 +249,17 @@ private boolean initializeIteration() { } return false; } + this.threshold = config.getThresholdAsRatio(); + this.maxDatanodesRatioToInvolvePerIteration = + config.getMaxDatanodesRatioToInvolvePerIteration(); + this.maxSizeToMovePerIteration = config.getMaxSizeToMovePerIteration(); + if (config.getNetworkTopologyEnable()) { + findTargetStrategy = new FindTargetGreedyByNetworkTopology( + containerManager, placementPolicy, nodeManager, networkTopology); + } else { + findTargetStrategy = new FindTargetGreedyByUsageInfo(containerManager, + placementPolicy, nodeManager); + } this.excludeNodes = config.getExcludeNodes(); this.includeNodes = config.getIncludeNodes(); // include/exclude nodes from balancing according to configs @@ -520,7 +523,7 @@ private ContainerMoveSelection matchSourceWithTarget(DatanodeDetails source) { } /** - * Checks if limits maxDatanodesRatioToInvolvePerIteration and + * Checks if limits maxDatanodesPercentageToInvolvePerIteration and * maxSizeToMovePerIteration have not been hit. * * @return {@link IterationResult#MAX_DATANODES_TO_INVOLVE_REACHED} if reached @@ -766,7 +769,7 @@ public void stop() { LOG.info("Container Balancer stopped successfully."); } - private void validateConfiguration(ContainerBalancerConfiguration conf) { + private boolean validateConfiguration(ContainerBalancerConfiguration conf) { // maxSizeEnteringTarget and maxSizeLeavingSource should by default be // greater than container size long size = (long) ozoneConfiguration.getStorageSize( @@ -776,10 +779,12 @@ private void validateConfiguration(ContainerBalancerConfiguration conf) { if (conf.getMaxSizeEnteringTarget() <= size) { LOG.info("MaxSizeEnteringTarget should be larger than " + "ozone.scm.container.size"); + return false; } if (conf.getMaxSizeLeavingSource() <= size) { LOG.info("MaxSizeLeavingSource should be larger than " + "ozone.scm.container.size"); + return false; } // balancing interval should be greater than DUFactory refresh period @@ -788,7 +793,9 @@ private void validateConfiguration(ContainerBalancerConfiguration conf) { if (conf.getBalancingInterval().toMillis() <= balancingInterval) { LOG.info("balancing.iteration.interval should be larger than " + "hdds.datanode.du.refresh.period."); + return false; } + return true; } public void setNodeManager(NodeManager nodeManager) { @@ -866,7 +873,7 @@ int getCountDatanodesInvolvedPerIteration() { return countDatanodesInvolvedPerIteration; } - long getSizeMovedPerIteration() { + public long getSizeMovedPerIteration() { return sizeMovedPerIteration; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java index a51e0bd567d..11a8a98dbeb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/ContainerBalancerConfiguration.java @@ -43,27 +43,25 @@ public final class ContainerBalancerConfiguration { LoggerFactory.getLogger(ContainerBalancerConfiguration.class); @Config(key = "utilization.threshold", type = ConfigType.AUTO, defaultValue = - "0.1", tags = {ConfigTag.BALANCER}, - description = "Threshold is a fraction in the range of 0 to 1. A " + + "10", tags = {ConfigTag.BALANCER}, + description = "Threshold is a percentage in the range of 0 to 100. A " + "cluster is considered balanced if for each datanode, the " + "utilization of the datanode (used space to capacity ratio) differs" + " from the utilization of the cluster (used space to capacity ratio" + - " of the entire cluster) no more than the threshold value.") - private String threshold = "0.1"; + " of the entire cluster) no more than the threshold.") + private String threshold = "10"; - @Config(key = "datanodes.involved.max.ratio.per.iteration", type = - ConfigType.AUTO, - defaultValue = "0.2", tags = {ConfigTag.BALANCER}, description = "The " + - "ratio of maximum number of datanodes that should be involved in " + - "balancing in one iteration to the total number of healthy, in service " + - "nodes known to container balancer.") - private String maxDatanodesRatioToInvolvePerIteration = "0.2"; + @Config(key = "datanodes.involved.max.percentage.per.iteration", type = + ConfigType.INT, defaultValue = "20", tags = {ConfigTag.BALANCER}, + description = "Maximum percentage of healthy, in service datanodes " + + "that can be involved in balancing in one iteration.") + private int maxDatanodesPercentageToInvolvePerIteration = 20; @Config(key = "size.moved.max.per.iteration", type = ConfigType.SIZE, - defaultValue = "30GB", tags = {ConfigTag.BALANCER}, + defaultValue = "500GB", tags = {ConfigTag.BALANCER}, description = "The maximum size of data in bytes that will be moved " + "by Container Balancer in one iteration.") - private long maxSizeToMovePerIteration = 30 * OzoneConsts.GB; + private long maxSizeToMovePerIteration = 500 * OzoneConsts.GB; @Config(key = "size.entering.target.max", type = ConfigType.SIZE, defaultValue = "26GB", tags = {ConfigTag.BALANCER}, description = "The " + @@ -81,10 +79,11 @@ public final class ContainerBalancerConfiguration { " (or default) ozone.scm.container.size.") private long maxSizeLeavingSource; - @Config(key = "idle.iterations", type = ConfigType.INT, + @Config(key = "iterations", type = ConfigType.INT, defaultValue = "10", tags = {ConfigTag.BALANCER}, - description = "The idle iteration count of Container Balancer.") - private int idleIterations = 10; + description = "The number of iterations that Container Balancer will " + + "run for.") + private int iterations = 10; @Config(key = "exclude.containers", type = ConfigType.STRING, defaultValue = "", tags = {ConfigTag.BALANCER}, description = "List of container IDs " + @@ -93,7 +92,7 @@ public final class ContainerBalancerConfiguration { @Config(key = "move.timeout", type = ConfigType.TIME, defaultValue = "30m", tags = {ConfigTag.BALANCER}, description = - "The amount of time in minutes to allow a single container to move " + + "The amount of time to allow a single container to move " + "from source to target.") private long moveTimeout = Duration.ofMinutes(30).toMillis(); @@ -101,7 +100,7 @@ public final class ContainerBalancerConfiguration { defaultValue = "70m", tags = { ConfigTag.BALANCER}, description = "The interval period between each " + "iteration of Container Balancer.") - private long balancingInterval; + private long balancingInterval = Duration.ofMinutes(70).toMillis(); @Config(key = "include.datanodes", type = ConfigType.STRING, defaultValue = "", tags = {ConfigTag.BALANCER}, description = "A list of Datanode " + @@ -127,46 +126,61 @@ public final class ContainerBalancerConfiguration { /** * Gets the threshold value for Container Balancer. * - * @return a fraction in the range 0 to 1 + * @return percentage value in the range 0 to 100 */ public double getThreshold() { return Double.parseDouble(threshold); } + public double getThresholdAsRatio() { + return Double.parseDouble(threshold) / 100; + } + /** * Sets the threshold value for Container Balancer. * - * @param threshold a fraction in the range 0 to 1 + * @param threshold a percentage value in the range 0 to 100 */ public void setThreshold(double threshold) { - if (threshold < 0 || threshold > 1) { + if (threshold < 0d || threshold >= 100d) { throw new IllegalArgumentException( - "Threshold must be a fraction in the range 0 to 1."); + "Threshold must be a percentage(double) in the range 0 to 100."); } this.threshold = String.valueOf(threshold); } /** - * Gets the idle iteration value for Container Balancer. + * Gets the iteration count for Container Balancer. A value of -1 means + * infinite number of iterations. * - * @return a idle iteration count larger than 0 + * @return a value greater than 0, or -1 */ - public int getIdleIteration() { - return idleIterations; + public int getIterations() { + return iterations; } /** - * Sets the idle iteration value for Container Balancer. + * Sets the number of iterations for Container Balancer. * - * @param count a idle iteration count larger than 0 + * @param count a value greater than 0, or -1 for running balancer infinitely */ - public void setIdleIteration(int count) { + public void setIterations(int count) { if (count < -1 || 0 == count) { throw new IllegalArgumentException( - "Idle iteration count must be larger than 0 or " + - "-1(for infinitely running)."); + "Iteration count must be greater than 0, or " + + "-1(for running balancer infinitely)."); } - this.idleIterations = count; + this.iterations = count; + } + + /** + * Gets the maximum percentage of healthy, in-service datanodes that will be + * involved in balancing in one iteration. + * + * @return percentage as an integer from 0 up to and including 100 + */ + public int getMaxDatanodesPercentageToInvolvePerIteration() { + return maxDatanodesPercentageToInvolvePerIteration; } /** @@ -188,37 +202,36 @@ public void setNetworkTopologyEnable(Boolean enable) { } /** - * Gets the ratio of maximum number of datanodes that will be involved in - * balancing by Container Balancer in one iteration to the total number of - * healthy, in-service nodes known to balancer. + * Gets the ratio of maximum datanodes involved in balancing to the total + * number of healthy, in-service datanodes known to SCM. * - * @return maximum datanodes to involve divided by total healthy, - * in-service nodes + * @return ratio as a double from 0 up to and including 1 */ public double getMaxDatanodesRatioToInvolvePerIteration() { - return Double.parseDouble(maxDatanodesRatioToInvolvePerIteration); + return maxDatanodesPercentageToInvolvePerIteration / 100d; } /** - * Sets the ratio of maximum number of datanodes that will be involved in - * balancing by Container Balancer in one iteration to the total number of - * healthy, in-service nodes known to balancer. + * Sets the maximum percentage of healthy, in-service datanodes that will be + * involved in balancing in one iteration. * - * @param maxDatanodesRatioToInvolvePerIteration number of datanodes to - * involve divided by total - * number of healthy, in - * service nodes + * @param maxDatanodesPercentageToInvolvePerIteration number of datanodes + * to involve divided by + * total number of + * healthy, in-service + * datanodes multiplied + * by 100 */ - public void setMaxDatanodesRatioToInvolvePerIteration( - double maxDatanodesRatioToInvolvePerIteration) { - if (maxDatanodesRatioToInvolvePerIteration < 0 || - maxDatanodesRatioToInvolvePerIteration > 1) { - throw new IllegalArgumentException("Max datanodes to involve ratio must" + - " be a double greater than equal to zero and lesser than equal to " + - "one."); + public void setMaxDatanodesPercentageToInvolvePerIteration( + int maxDatanodesPercentageToInvolvePerIteration) { + if (maxDatanodesPercentageToInvolvePerIteration < 0 || + maxDatanodesPercentageToInvolvePerIteration > 100) { + throw new IllegalArgumentException(String.format("Argument %d is " + + "illegal. Percentage must be from 0 up to and including 100.", + maxDatanodesPercentageToInvolvePerIteration)); } - this.maxDatanodesRatioToInvolvePerIteration = - String.valueOf(maxDatanodesRatioToInvolvePerIteration); + this.maxDatanodesPercentageToInvolvePerIteration = + maxDatanodesPercentageToInvolvePerIteration; } /** @@ -347,12 +360,12 @@ public String toString() { return String.format("Container Balancer Configuration values:%n" + "%-50s %s%n" + "%-50s %s%n" + - "%-50s %s%n" + - "%-50s %dGB%n"+ - "%-50s %dGB%n"+ + "%-50s %d%n" + + "%-50s %dGB%n" + + "%-50s %dGB%n" + "%-50s %dGB%n", "Key", "Value", "Threshold", - threshold, "Max Datanodes to Involve per Iteration(ratio)", - maxDatanodesRatioToInvolvePerIteration, + threshold, "Max Datanodes to Involve per Iteration(percent)", + maxDatanodesPercentageToInvolvePerIteration, "Max Size to Move per Iteration", maxSizeToMovePerIteration / OzoneConsts.GB, "Max Size Entering Target per Iteration", diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java index 591461d8875..540d26356df 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/FindSourceGreedy.java @@ -33,7 +33,7 @@ * The selection criteria for selecting source datanodes , the containers of * which will be moved out. */ -public class FindSourceGreedy implements FindSourceStrategy{ +public class FindSourceGreedy implements FindSourceStrategy { private static final Logger LOG = LoggerFactory.getLogger(FindSourceGreedy.class); private Map sizeLeavingNode; @@ -84,7 +84,7 @@ private void setConfiguration(ContainerBalancerConfiguration conf) { @Override public void increaseSizeLeaving(DatanodeDetails dui, long size) { Long currentSize = sizeLeavingNode.get(dui); - if(currentSize != null) { + if (currentSize != null) { sizeLeavingNode.put(dui, currentSize + size); //reorder according to the latest sizeLeavingNode potentialSources.add(nodeManager.getUsageInfo(dui)); @@ -114,7 +114,7 @@ public DatanodeDetails getNextCandidateSourceDataNode() { * data nodes. */ @Override - public void removeCandidateSourceDataNode(DatanodeDetails dui){ + public void removeCandidateSourceDataNode(DatanodeDetails dui) { potentialSources.removeIf(a -> a.getDatanodeDetails().equals(dui)); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java index c799b02eee1..bf0ea7cb38e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java @@ -47,7 +47,7 @@ private ContainerPlacementPolicyFactory() { public static PlacementPolicy getPolicy( ConfigurationSource conf, final NodeManager nodeManager, NetworkTopology clusterMap, final boolean fallback, - SCMContainerPlacementMetrics metrics) throws SCMException{ + SCMContainerPlacementMetrics metrics) throws SCMException { final Class placementClass = conf .getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java index 1ca68bd3ebf..22bdf21df93 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementMetrics.java @@ -33,7 +33,7 @@ /** * This class is for maintaining Topology aware container placement statistics. */ -@Metrics(about="SCM Container Placement Metrics", context = OzoneConsts.OZONE) +@Metrics(about = "SCM Container Placement Metrics", context = OzoneConsts.OZONE) public class SCMContainerPlacementMetrics implements MetricsSource { public static final String SOURCE_NAME = SCMContainerPlacementMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index d46713b602c..2631a1d951d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -55,7 +55,7 @@ public final class SCMContainerPlacementRackAware private final NetworkTopology networkTopology; private boolean fallback; private static final int RACK_LEVEL = 1; - private static final int MAX_RETRY= 3; + private static final int MAX_RETRY = 3; private final SCMContainerPlacementMetrics metrics; // Used to check the placement policy is validated in the parent class private static final int REQUIRED_RACKS = 2; @@ -118,7 +118,7 @@ public List chooseDatanodes( mutableFavoredNodes.addAll(favoredNodes); mutableFavoredNodes.removeAll(excludedNodes); } - int favoredNodeNum = mutableFavoredNodes == null? 0 : + int favoredNodeNum = mutableFavoredNodes == null ? 0 : mutableFavoredNodes.size(); List chosenNodes = new ArrayList<>(); @@ -195,7 +195,7 @@ public List chooseDatanodes( // in the same rack, then choose nodes on different racks, otherwise, // choose one on the same rack as one of excluded nodes, remaining chosen // are on different racks. - for(int i = 0; i < excludedNodesCount; i++) { + for (int i = 0; i < excludedNodesCount; i++) { for (int j = i + 1; j < excludedNodesCount; j++) { if (networkTopology.isSameParent( excludedNodes.get(i), excludedNodes.get(j))) { @@ -257,7 +257,7 @@ private DatanodeDetails chooseNode(List excludedNodes, int maxRetry = MAX_RETRY; List excludedNodesForCapacity = null; boolean isFallbacked = false; - while(true) { + while (true) { metrics.incrDatanodeChooseAttemptCount(); DatanodeDetails node = null; if (affinityNodes != null) { @@ -348,8 +348,8 @@ private List chooseNodes(List excludedNodes, Preconditions.checkArgument(chosenNodes != null); List excludedNodeList = excludedNodes != null ? excludedNodes : chosenNodes; - int favoredNodeNum = favoredNodes == null? 0 : favoredNodes.size(); - while(true) { + int favoredNodeNum = favoredNodes == null ? 0 : favoredNodes.size(); + while (true) { DatanodeDetails favoredNode = favoredNodeNum > favorIndex ? favoredNodes.get(favorIndex) : null; DatanodeDetails chosenNode; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java index 51948291a4b..f9d2ade8fd9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/metrics/SCMMetrics.java @@ -28,7 +28,7 @@ /** * This class is for maintaining StorageContainerManager statistics. */ -@Metrics(about="Storage Container Manager Metrics", context="dfs") +@Metrics(about = "Storage Container Manager Metrics", context = "dfs") public class SCMMetrics { public static final String SOURCE_NAME = SCMMetrics.class.getSimpleName(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java index 9a70f6974b8..0f828aef1a6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManagerMetrics.java @@ -16,7 +16,10 @@ */ package org.apache.hadoop.hdds.scm.container.replication; +import com.google.common.base.CaseFormat; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ReplicationManager; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.metrics2.MetricsCollector; import org.apache.hadoop.metrics2.MetricsInfo; import org.apache.hadoop.metrics2.MetricsRecordBuilder; @@ -30,6 +33,13 @@ import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.ozone.OzoneConsts; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.Map; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState; + /** * Class contains metrics related to ReplicationManager. */ @@ -51,6 +61,28 @@ public final class ReplicationManagerMetrics implements MetricsSource { "InflightMove", "Tracked inflight container move requests."); + // Setup metric names and descriptions for Container Lifecycle states + private static final Map LIFECYCLE_STATE_METRICS + = Collections.unmodifiableMap( + new LinkedHashMap() {{ + for (LifeCycleState s : LifeCycleState.values()) { + String name = CaseFormat.UPPER_UNDERSCORE + .to(CaseFormat.UPPER_CAMEL, s.toString()); + String metric = "Num" + name + "Containers"; + String description = "Containers in " + name + " state"; + put(s, Interns.info(metric, description)); + } + }}); + + // Setup metric names and descriptions for + private static final Map + CONTAINER_HEALTH_STATE_METRICS = Collections.unmodifiableMap( + new LinkedHashMap() {{ + for (HealthState s : HealthState.values()) { + put(s, Interns.info(s.getMetricName(), s.getDescription())); + } + }}); + @Metric("Number of replication commands sent.") private MutableCounterLong numReplicationCmdsSent; @@ -110,6 +142,16 @@ public void getMetrics(MetricsCollector collector, boolean all) { .addGauge(INFLIGHT_DELETION, getInflightDeletion()) .addGauge(INFLIGHT_MOVE, getInflightMove()); + ReplicationManagerReport report = replicationManager.getContainerReport(); + for (Map.Entry e : + LIFECYCLE_STATE_METRICS.entrySet()) { + builder.addGauge(e.getValue(), report.getStat(e.getKey())); + } + for (Map.Entry e : + CONTAINER_HEALTH_STATE_METRICS.entrySet()) { + builder.addGauge(e.getValue(), report.getStat(e.getKey())); + } + numReplicationCmdsSent.snapshot(builder, all); numReplicationCmdsCompleted.snapshot(builder, all); numReplicationCmdsTimeout.snapshot(builder, all); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index bbf1c700d19..c254dd723e5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -358,7 +358,7 @@ public NavigableSet getMatchingContainerIDs( final ContainerQueryKey queryKey = new ContainerQueryKey(state, owner, repConfig); - if(resultCache.containsKey(queryKey)){ + if (resultCache.containsKey(queryKey)) { return resultCache.get(queryKey); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java index 5023e93a9ea..edea6816aea 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/HASecurityUtils.java @@ -216,7 +216,7 @@ private static void getPrimarySCMSelfSignedCert(CertificateClient client, // Persist scm cert serial ID. scmStorageConfig.setScmCertSerialId(subSCMCertHolder.getSerialNumber() .toString()); - } catch (InterruptedException | ExecutionException| IOException | + } catch (InterruptedException | ExecutionException | IOException | CertificateException e) { LOG.error("Error while fetching/storing SCM signed certificate.", e); Thread.currentThread().interrupt(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java index e949850f6ca..4154b62125f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java @@ -133,7 +133,7 @@ private static void setRaftRpcProperties(final RaftProperties properties, ScmConfigKeys.OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT, ScmConfigKeys. OZONE_SCM_HA_RATIS_LEADER_ELECTION_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS)+200L, + TimeUnit.MILLISECONDS) + 200L, TimeUnit.MILLISECONDS)); Rpc.setSlownessTimeout(properties, TimeDuration.valueOf( ozoneConf.getTimeDuration( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java index b07ee54147b..bb12df6ec0f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java @@ -68,7 +68,7 @@ public Object invoke(final Object proxy, final Method method, invokeLocal(method, args); LOG.debug("Call: {} took {} ms", method, Time.monotonicNow() - startTime); return result; - } catch(InvocationTargetException iEx) { + } catch (InvocationTargetException iEx) { throw iEx.getCause(); } } @@ -88,7 +88,8 @@ private Object invokeLocal(Method method, Object[] args) */ private Object invokeRatis(Method method, Object[] args) throws Exception { - long startTime = Time.monotonicNowNanos(); + LOG.trace("Invoking method {} on target {}", method, ratisHandler); + // TODO: Add metric here to track time taken by Ratis Preconditions.checkNotNull(ratisHandler); SCMRatisRequest scmRatisRequest = SCMRatisRequest.of(requestType, method.getName(), method.getParameterTypes(), args); @@ -99,7 +100,7 @@ private Object invokeRatis(Method method, Object[] args) // via ratis. So, in this special scenario we use RaftClient. final SCMRatisResponse response; if (method.getName().equals("storeValidCertificate") && - args[args.length -1].equals(HddsProtos.NodeType.SCM)) { + args[args.length - 1].equals(HddsProtos.NodeType.SCM)) { response = HASecurityUtils.submitScmCertsToRatis( ratisHandler.getDivision().getGroup(), @@ -110,8 +111,6 @@ private Object invokeRatis(Method method, Object[] args) response = ratisHandler.submitRequest( scmRatisRequest); } - LOG.info("Invoking method {} on target {}, cost {}us", - method, ratisHandler, (Time.monotonicNowNanos() - startTime) / 1000.0); if (response.isSuccess()) { return response.getResult(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 38215754590..a69f2cbbb43 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -73,6 +73,7 @@ public SCMHAManagerImpl(final ConfigurationSource conf, final StorageContainerManager scm) throws IOException { this.conf = conf; this.scm = scm; + this.exitManager = new ExitManager(); if (SCMHAUtils.isSCMHAEnabled(conf)) { this.transactionBuffer = new SCMHADBTransactionBufferImpl(scm); this.ratisServer = new SCMRatisServerImpl(conf, scm, @@ -258,7 +259,7 @@ public TermIndex installCheckpoint(Path checkpointLocation, throw e; } - File dbBackup = null; + File dbBackup; try { dbBackup = HAUtils .replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation, @@ -266,29 +267,41 @@ public TermIndex installCheckpoint(Path checkpointLocation, LOG.info("Replaced DB with checkpoint, term: {}, index: {}", term, lastAppliedIndex); } catch (Exception e) { + // If we are not able to install latest checkpoint we should throw + // this exception. In this way reinitialize can throw exception to + // ratis to handle properly. LOG.error("Failed to install Snapshot as SCM failed to replace" - + " DB with downloaded checkpoint. Reloading old SCM state.", e); + + " DB with downloaded checkpoint. Checkpoint transaction {}", e, + checkpointTxnInfo.getTransactionIndex()); + throw e; } + // Reload the DB store with the new checkpoint. - // Restart (unpause) the state machine and update its last applied index - // to the installed checkpoint's snapshot index. try { reloadSCMState(); LOG.info("Reloaded SCM state with Term: {} and Index: {}", term, lastAppliedIndex); } catch (Exception ex) { + LOG.info("Failed to reload SCM state with Term: {} and Index: {}", term, + lastAppliedIndex); + // revert to the old db, since the new db may be a corrupted one + // so that SCM can restart from the old db. try { - // revert to the old db, since the new db may be a corrupted one, - // so that SCM can restart from the old db. if (dbBackup != null) { - dbBackup = HAUtils - .replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation, + dbBackup = + HAUtils.replaceDBWithCheckpoint(lastAppliedIndex, oldDBLocation, dbBackup.toPath(), OzoneConsts.SCM_DB_BACKUP_PREFIX); - startServices(); + LOG.error("Replacing SCM state with Term : {} and Index:", + termIndex.getTerm(), termIndex.getTerm()); + // This is being done to check before stop with old db + // try to reload and then finally terminate and also test has + // assumption for re-verify after corrupt DB loading without + // reloadSCMState call test fails with NPE when finding db location. + reloadSCMState(); } } finally { - String errorMsg = - "Failed to reload SCM state and instantiate services."; + String errorMsg = "Failed to reload SCM state and instantiate " + + "services."; exitManager.exitSystem(1, errorMsg, ex, LOG); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index f63930056ba..b48dfb616dc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -258,7 +258,8 @@ public List getRatisRoles() throws IOException { peer.getAddress().concat(isLocal ? ":".concat(RaftProtos.RaftPeerRole.LEADER.toString()) : ":".concat(RaftProtos.RaftPeerRole.FOLLOWER.toString())) - .concat(":".concat(peer.getId().toString())))); + .concat(":".concat(peer.getId().toString())) + .concat(":".concat(peerInetAddress.getHostAddress())))); } return ratisRoles; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index 8fa1866d5b7..9aeda10225f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -357,7 +357,7 @@ public void pause() { } @Override - public void reinitialize() { + public void reinitialize() throws IOException { Preconditions.checkNotNull(installingDBCheckpoint); DBCheckpoint checkpoint = installingDBCheckpoint; @@ -369,8 +369,8 @@ public void reinitialize() { termIndex = scm.getScmHAManager().installCheckpoint(checkpoint); } catch (Exception e) { - LOG.error("Failed to reinitialize SCMStateMachine."); - return; + LOG.error("Failed to reinitialize SCMStateMachine.", e); + throw new IOException(e); } // re-initialize the DBTransactionBuffer and update the lastAppliedIndex. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java index 95d906e7382..9fb771b7a70 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/io/CodecFactory.java @@ -47,7 +47,7 @@ public final class CodecFactory { codecs.put(X509Certificate.class, new X509CertificateCodec()); } - private CodecFactory() {} + private CodecFactory() { } public static Codec getCodec(Class type) throws InvalidProtocolBufferException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index 799e1282027..de7fcb0b746 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -238,11 +238,11 @@ public Table getCRLSequenceIdTable() { @Override public TableIterator getAllCerts(CertificateStore.CertType certType) { - if(certType == CertificateStore.CertType.VALID_CERTS) { + if (certType == CertificateStore.CertType.VALID_CERTS) { return validCertsTable.iterator(); } - if(certType == CertificateStore.CertType.REVOKED_CERTS) { + if (certType == CertificateStore.CertType.REVOKED_CERTS) { return revokedCertsTable.iterator(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java index 9bfa7d6c4c0..bf2559b8ab1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/X509CertificateCodec.java @@ -44,7 +44,7 @@ public byte[] toPersistedFormat(X509Certificate object) throws IOException { @Override public X509Certificate fromPersistedFormat(byte[] rawData) throws IOException { - try{ + try { String s = new String(rawData, StandardCharsets.UTF_8); return CertificateCodec.getX509Certificate(s); } catch (CertificateException exp) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java index eb6dc0d424f..aa930251c4f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/CommandQueue.java @@ -91,7 +91,7 @@ List getCommand(final UUID datanodeUuid) { try { Commands cmds = commandMap.remove(datanodeUuid); List cmdList = null; - if(cmds != null) { + if (cmds != null) { cmdList = cmds.getCommands(); commandsInQueue -= cmdList.size() > 0 ? cmdList.size() : 0; // A post condition really. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java index 27a84deaffe..47d7c534698 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeDecommissionManager.java @@ -85,20 +85,20 @@ public int getPort() { return port; } - private void parseHostname() throws InvalidHostStringException{ + private void parseHostname() throws InvalidHostStringException { try { // A URI *must* have a scheme, so just create a fake one - URI uri = new URI("empty://"+rawHostname.trim()); + URI uri = new URI("empty://" + rawHostname.trim()); this.hostname = uri.getHost(); this.port = uri.getPort(); if (this.hostname == null) { - throw new InvalidHostStringException("The string "+rawHostname+ + throw new InvalidHostStringException("The string " + rawHostname + " does not contain a value hostname or hostname:port definition"); } } catch (URISyntaxException e) { throw new InvalidHostStringException( - "Unable to parse the hoststring "+rawHostname, e); + "Unable to parse the hoststring " + rawHostname, e); } } } @@ -138,7 +138,7 @@ private List mapHostnamesToDatanodes(List hosts) results.add(found.get(0)); } else if (found.size() > 1) { DatanodeDetails match = null; - for(DatanodeDetails dn : found) { + for (DatanodeDetails dn : found) { if (validateDNPortMatch(host.getPort(), dn)) { match = dn; break; @@ -231,7 +231,7 @@ public synchronized List decommissionNodes( // NodeNotFoundException here expect if the node is remove in the // very short window between validation and starting decom. Therefore // log a warning and ignore the exception - LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + "decommission it", dn.getHostName()); errors.add(new DatanodeAdminError(dn.getHostName(), "The host was not found in SCM")); @@ -274,12 +274,12 @@ public synchronized void startDecommission(DatanodeDetails dn) dn, NodeOperationalState.DECOMMISSIONING); monitor.startMonitoring(dn); } else if (nodeStatus.isDecommission()) { - LOG.info("Start Decommission called on node {} in state {}. Nothing to "+ + LOG.info("Start Decommission called on node {} in state {}. Nothing to " + "do.", dn, opState); } else { LOG.error("Cannot decommission node {} in state {}", dn, opState); - throw new InvalidNodeStateException("Cannot decommission node "+ - dn +" in state "+ opState); + throw new InvalidNodeStateException("Cannot decommission node " + + dn + " in state " + opState); } } @@ -296,7 +296,7 @@ public synchronized List recommissionNodes( // NodeNotFoundException here expect if the node is remove in the // very short window between validation and starting decom. Therefore // log a warning and ignore the exception - LOG.warn("Host {} was not found in SCM. Ignoring the request to "+ + LOG.warn("Host {} was not found in SCM. Ignoring the request to " + "recommission it.", dn.getHostName()); errors.add(new DatanodeAdminError(dn.getHostName(), "The host was not found in SCM")); @@ -306,7 +306,7 @@ public synchronized List recommissionNodes( } public synchronized void recommission(DatanodeDetails dn) - throws NodeNotFoundException{ + throws NodeNotFoundException { NodeStatus nodeStatus = getNodeStatus(dn); NodeOperationalState opState = nodeStatus.getOperationalState(); if (opState != NodeOperationalState.IN_SERVICE) { @@ -315,7 +315,7 @@ public synchronized void recommission(DatanodeDetails dn) monitor.stopMonitoring(dn); LOG.info("Queued node {} for recommission", dn); } else { - LOG.info("Recommission called on node {} with state {}. "+ + LOG.info("Recommission called on node {} with state {}. " + "Nothing to do.", dn, opState); } } @@ -333,7 +333,7 @@ public synchronized List startMaintenanceNodes( // NodeNotFoundException here expect if the node is remove in the // very short window between validation and starting decom. Therefore // log a warning and ignore the exception - LOG.warn("The host {} was not found in SCM. Ignoring the request to "+ + LOG.warn("The host {} was not found in SCM. Ignoring the request to " + "start maintenance on it", dn.getHostName()); } catch (InvalidNodeStateException e) { errors.add(new DatanodeAdminError(dn.getHostName(), e.getMessage())); @@ -360,12 +360,12 @@ public synchronized void startMaintenance(DatanodeDetails dn, int endInHours) monitor.startMonitoring(dn); LOG.info("Starting Maintenance for node {}", dn); } else if (nodeStatus.isMaintenance()) { - LOG.info("Starting Maintenance called on node {} with state {}. "+ + LOG.info("Starting Maintenance called on node {} with state {}. " + "Nothing to do.", dn, opState); } else { LOG.error("Cannot start maintenance on node {} in state {}", dn, opState); - throw new InvalidNodeStateException("Cannot start maintenance on node "+ - dn +" in state "+ opState); + throw new InvalidNodeStateException("Cannot start maintenance on node " + + dn + " in state " + opState); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java index 4e1a9649ea4..6127bb0b4d7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java @@ -324,7 +324,7 @@ default Collection getPeerList(DatanodeDetails dn) { return null; } - default HDDSLayoutVersionManager getLayoutVersionManager(){ + default HDDSLayoutVersionManager getLayoutVersionManager() { return null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index e752454e009..85dd6a06737 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -728,7 +728,7 @@ public void checkNodesHealth() { (lastHbTime) -> lastHbTime < staleNodeDeadline; try { - for(DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) { + for (DatanodeInfo node : nodeStateMap.getAllDatanodeInfos()) { NodeStatus status = nodeStateMap.getNodeStatus(node.getUuid()); switch (status.getHealth()) { case HEALTHY: diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java index a9164c72973..03dd2e2b6de 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStatus.java @@ -209,8 +209,8 @@ public int hashCode() { @Override public String toString() { - return "OperationalState: "+operationalState+" Health: "+health+ - " OperationStateExpiry: "+opStateExpiryEpochSeconds; + return "OperationalState: " + operationalState + " Health: " + health + + " OperationStateExpiry: " + opStateExpiryEpochSeconds; } } \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 68c26972371..8899b13b6f4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -268,7 +268,7 @@ public NodeStatus getNodeStatus(DatanodeDetails datanodeDetails) */ @Override public void setNodeOperationalState(DatanodeDetails datanodeDetails, - NodeOperationalState newState) throws NodeNotFoundException{ + NodeOperationalState newState) throws NodeNotFoundException { setNodeOperationalState(datanodeDetails, newState, 0); } @@ -283,7 +283,7 @@ public void setNodeOperationalState(DatanodeDetails datanodeDetails, @Override public void setNodeOperationalState(DatanodeDetails datanodeDetails, NodeOperationalState newState, long opStateExpiryEpocSec) - throws NodeNotFoundException{ + throws NodeNotFoundException { nodeStateManager.setNodeOperationalState( datanodeDetails, newState, opStateExpiryEpocSec); } @@ -612,7 +612,7 @@ public void processLayoutVersionReport(DatanodeDetails datanodeDetails, // send Finalize command multiple times. scmNodeEventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(datanodeDetails.getUuid(), finalizeCmd)); - } catch(NotLeaderException ex) { + } catch (NotLeaderException ex) { LOG.warn("Skip sending finalize upgrade command since current SCM is" + "not leader.", ex); } @@ -764,7 +764,7 @@ public Map> getNodeCount() { for (DatanodeInfo dni : nodeStateManager.getAllNodes()) { NodeStatus status = dni.getNodeStatus(); nodes.get(status.getOperationalState().name()) - .compute(status.getHealth().name(), (k, v) -> v+1); + .compute(status.getHealth().name(), (k, v) -> v + 1); } return nodes; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java index 6eb73595d2e..b727580d1b7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeMetrics.java @@ -129,12 +129,12 @@ public void getMetrics(MetricsCollector collector, boolean all) { * ... */ MetricsRecordBuilder metrics = collector.addRecord(registry.info()); - for(Map.Entry> e : nodeCount.entrySet()) { - for(Map.Entry h : e.getValue().entrySet()) { + for (Map.Entry> e : nodeCount.entrySet()) { + for (Map.Entry h : e.getValue().entrySet()) { metrics.addGauge( Interns.info( - StringUtils.camelize(e.getKey()+"_"+h.getKey()+"_nodes"), - "Number of "+e.getKey()+" "+h.getKey()+" datanodes"), + StringUtils.camelize(e.getKey() + "_" + h.getKey() + "_nodes"), + "Number of " + e.getKey() + " " + h.getKey() + " datanodes"), h.getValue()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java index 1b0e5b56e77..ed45ed06ebe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeStorageStatMap.java @@ -136,7 +136,7 @@ private void registerMXBean() { //TODO: Unregister call should happen as a part of SCMNodeManager shutdown. private void unregisterMXBean() { - if(this.scmNodeStorageInfoBean != null) { + if (this.scmNodeStorageInfoBean != null) { MBeans.unregister(this.scmNodeStorageInfoBean); this.scmNodeStorageInfoBean = null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java index 57a377d998f..b934d977fa6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2ObjectsMap.java @@ -99,7 +99,7 @@ public void removeDatanode(UUID datanodeID) { Set getObjects(UUID datanode) { Preconditions.checkNotNull(datanode); final Set s = dn2ObjectMap.get(datanode); - return s != null? Collections.unmodifiableSet(s): Collections.emptySet(); + return s != null ? Collections.unmodifiableSet(s) : Collections.emptySet(); } public ReportResult.ReportResultBuilder newBuilder() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java index 0a3e1377d6f..3146e8fede4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/NodeStateMap.java @@ -330,7 +330,7 @@ public void addContainer(final UUID uuid, } public void setContainers(UUID uuid, Set containers) - throws NodeNotFoundException{ + throws NodeNotFoundException { lock.writeLock().lock(); try { checkIfNodeExist(uuid); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java index 9b9e206e05a..954d212ac22 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerImpl.java @@ -562,7 +562,7 @@ public void close() throws IOException { backgroundPipelineCreator.stop(); } - if(pmInfoBean != null) { + if (pmInfoBean != null) { MBeans.unregister(this.pmInfoBean); pmInfoBean = null; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index 85ea5a558d7..64815a92d3e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -90,12 +90,12 @@ public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, for (PipelineReport report : pipelineReport.getPipelineReportList()) { try { processPipelineReport(report, dn, publisher); - } catch(NotLeaderException ex) { + } catch (NotLeaderException ex) { // Avoid NotLeaderException logging which happens when processing // pipeline report on followers. } catch (PipelineNotFoundException e) { LOGGER.error("Could not find pipeline {}", report.getPipelineID()); - } catch(IOException e) { + } catch (IOException e) { LOGGER.error("Could not process pipeline report={} from dn={}.", report, dn, e); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java index 9589eb9c333..0b93f4de8c7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerImpl.java @@ -258,10 +258,8 @@ public void updatePipelineState( HddsProtos.PipelineID pipelineIDProto, HddsProtos.PipelineState newState) throws IOException { PipelineID pipelineID = PipelineID.getFromProtobuf(pipelineIDProto); - Pipeline.PipelineState oldState = null; lock.writeLock().lock(); try { - oldState = getPipeline(pipelineID).getPipelineState(); // null check is here to prevent the case where SCM store // is closed but the staleNode handlers/pipeline creations // still try to access it. @@ -275,9 +273,8 @@ public void updatePipelineState( LOG.warn("Pipeline {} is not found in the pipeline Map. Pipeline" + " may have been deleted already.", pipelineID); } catch (IOException ex) { - LOG.warn("Pipeline {} state update failed", pipelineID); - // revert back to old state in memory - pipelineStateMap.updatePipelineState(pipelineID, oldState); + LOG.error("Pipeline {} state update failed", pipelineID); + throw ex; } finally { lock.writeLock().unlock(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 847b50e4c7e..bbdabf0c476 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -91,7 +91,7 @@ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf); final RaftPeer p = RatisHelper.toRaftPeer(dn); - try(RaftClient client = RatisHelper + try (RaftClient client = RatisHelper .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, retryPolicy, grpcTlsConfig, ozoneConf)) { client.getGroupManagementApi(p.getId()) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java index 356d047a5e5..f130eedef50 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/WritableContainerFactory.java @@ -44,7 +44,7 @@ public WritableContainerFactory(StorageContainerManager scm) { public ContainerInfo getContainer(final long size, ReplicationConfig repConfig, String owner, ExcludeList excludeList) throws IOException { - switch(repConfig.getReplicationType()) { + switch (repConfig.getReplicationType()) { case STAND_ALONE: return standaloneProvider .getContainer(size, repConfig, owner, excludeList); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 3a471fbb0b0..5d6ee5b3117 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -43,6 +43,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto; @@ -53,6 +55,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto; @@ -67,6 +70,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.QueryUpgradeFinalizationProgressResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesResponseProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerRequestProto; @@ -320,6 +325,13 @@ public ScmContainerLocationResponse processRequest( .setReplicationManagerStatusResponse(getReplicationManagerStatus( request.getSeplicationManagerStatusRequest())) .build(); + case GetReplicationManagerReport: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetReplicationManagerReportResponse(getReplicationManagerReport( + request.getReplicationManagerReportRequest())) + .build(); case StartContainerBalancer: return ScmContainerLocationResponse.newBuilder() .setCmdType(request.getCmdType()) @@ -397,6 +409,20 @@ public ScmContainerLocationResponse processRequest( .setDatanodeUsageInfoResponse(getDatanodeUsageInfo( request.getDatanodeUsageInfoRequest())) .build(); + case GetContainerCount: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetContainerCountResponse(getContainerCount( + request.getGetContainerCountRequest())) + .build(); + case GetContainerReplicas: + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()) + .setStatus(Status.OK) + .setGetContainerReplicasResponse(getContainerReplicas( + request.getGetContainerReplicasRequest())) + .build(); default: throw new IllegalArgumentException( "Unknown command type: " + request.getCmdType()); @@ -408,6 +434,14 @@ public ScmContainerLocationResponse processRequest( } } + public GetContainerReplicasResponseProto getContainerReplicas( + GetContainerReplicasRequestProto request) throws IOException { + List replicas + = impl.getContainerReplicas(request.getContainerID()); + return GetContainerReplicasResponseProto.newBuilder() + .addAllContainerReplica(replicas).build(); + } + public ContainerResponseProto allocateContainer(ContainerRequestProto request, int clientVersion) throws IOException { ContainerWithPipeline cp = impl @@ -706,44 +740,60 @@ public ReplicationManagerStatusResponseProto getReplicationManagerStatus( .setIsRunning(impl.getReplicationManagerStatus()).build(); } + public ReplicationManagerReportResponseProto getReplicationManagerReport( + ReplicationManagerReportRequestProto request) throws IOException { + return ReplicationManagerReportResponseProto.newBuilder() + .setReport(impl.getReplicationManagerReport().toProtobuf()) + .build(); + } + public StartContainerBalancerResponseProto startContainerBalancer( StartContainerBalancerRequestProto request) throws IOException { Optional threshold = Optional.empty(); - Optional idleiterations = Optional.empty(); - Optional maxDatanodesRatioToInvolvePerIteration = Optional.empty(); + Optional iterations = Optional.empty(); + Optional maxDatanodesPercentageToInvolvePerIteration = + Optional.empty(); Optional maxSizeToMovePerIterationInGB = Optional.empty(); Optional maxSizeEnteringTargetInGB = Optional.empty(); Optional maxSizeLeavingSourceInGB = Optional.empty(); - if(request.hasThreshold()) { + if (request.hasThreshold()) { threshold = Optional.of(request.getThreshold()); } - if(request.hasIdleiterations()) { - idleiterations = Optional.of(request.getIdleiterations()); + + if (request.hasIterations()) { + iterations = Optional.of(request.getIterations()); + } else if (request.hasIdleiterations()) { + iterations = Optional.of(request.getIdleiterations()); } - if(request.hasMaxDatanodesRatioToInvolvePerIteration()) { - maxDatanodesRatioToInvolvePerIteration = - Optional.of(request.getMaxDatanodesRatioToInvolvePerIteration()); + + if (request.hasMaxDatanodesPercentageToInvolvePerIteration()) { + maxDatanodesPercentageToInvolvePerIteration = + Optional.of(request.getMaxDatanodesPercentageToInvolvePerIteration()); + } else if (request.hasMaxDatanodesRatioToInvolvePerIteration()) { + maxDatanodesPercentageToInvolvePerIteration = + Optional.of( + (int) (request.getMaxDatanodesRatioToInvolvePerIteration() * + 100)); } - if(request.hasMaxSizeToMovePerIterationInGB()) { + + if (request.hasMaxSizeToMovePerIterationInGB()) { maxSizeToMovePerIterationInGB = Optional.of(request.getMaxSizeToMovePerIterationInGB()); } - if(request.hasMaxSizeEnteringTargetInGB()) { + if (request.hasMaxSizeEnteringTargetInGB()) { maxSizeEnteringTargetInGB = Optional.of(request.getMaxSizeEnteringTargetInGB()); } - if(request.hasMaxSizeLeavingSourceInGB()) { + if (request.hasMaxSizeLeavingSourceInGB()) { maxSizeLeavingSourceInGB = Optional.of(request.getMaxSizeLeavingSourceInGB()); } - - return StartContainerBalancerResponseProto.newBuilder(). setStart(impl.startContainerBalancer(threshold, - idleiterations, maxDatanodesRatioToInvolvePerIteration, + iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, maxSizeLeavingSourceInGB)).build(); } @@ -831,4 +881,12 @@ public DatanodeUsageInfoResponseProto getDatanodeUsageInfo( .build(); } + public GetContainerCountResponseProto getContainerCount( + StorageContainerLocationProtocolProtos.GetContainerCountRequestProto + request) throws IOException { + + return GetContainerCountResponseProto.newBuilder() + .setContainerCount(impl.getContainerCount()) + .build(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index ef9d6f438a5..2a1c8956e94 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -42,7 +42,7 @@ * Class defining Safe mode exit criteria for Containers. */ public class ContainerSafeModeRule extends - SafeModeExitRule{ + SafeModeExitRule { public static final Logger LOG = LoggerFactory.getLogger(ContainerSafeModeRule.class); @@ -115,7 +115,7 @@ protected synchronized void process( reportsProto.getReport().getReportsList().forEach(c -> { if (containerMap.containsKey(c.getContainerID())) { - if(containerMap.remove(c.getContainerID()) != null) { + if (containerMap.remove(c.getContainerID()) != null) { containerWithMinReplicas.getAndAdd(1); getSafeModeMetrics() .incCurrentContainersWithOneReplicaReportedCount(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java index 0c4ce84a9ce..b03fedb647e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java @@ -33,7 +33,7 @@ * registered with SCM. */ public class DataNodeSafeModeRule extends - SafeModeExitRule{ + SafeModeExitRule { // Min DataNodes required to exit safe mode. private int requiredDns; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index c7b831bfe1e..fb4ba7db65e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -204,7 +204,7 @@ public List allocateBlock( ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.ALLOCATE_BLOCK, auditMap) ); @@ -274,7 +274,7 @@ public List deleteKeyBlocks( @Override public ScmInfo getScmInfo() throws IOException { boolean auditSuccess = true; - try{ + try { ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(scm.getScmStorageConfig().getClusterID()) @@ -287,7 +287,7 @@ public ScmInfo getScmInfo() throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null) ); @@ -305,7 +305,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { auditMap.put("cluster", String.valueOf(request.getClusterId())); auditMap.put("addr", String.valueOf(request.getRatisAddr())); boolean auditSuccess = true; - try{ + try { return scm.getScmHAManager().addSCM(request); } catch (Exception ex) { auditSuccess = false; @@ -314,7 +314,7 @@ public boolean addSCM(AddSCMRequest request) throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.ADD_SCM, auditMap) ); @@ -326,12 +326,12 @@ public boolean addSCM(AddSCMRequest request) throws IOException { public List sortDatanodes(List nodes, String clientMachine) throws IOException { boolean auditSuccess = true; - try{ + try { NodeManager nodeManager = scm.getScmNodeManager(); Node client = null; List possibleClients = nodeManager.getNodesByAddress(clientMachine); - if (possibleClients.size()>0){ + if (possibleClients.size() > 0) { client = possibleClients.get(0); } List nodeList = new ArrayList(); @@ -353,7 +353,7 @@ public List sortDatanodes(List nodes, ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.SORT_DATANODE, null) ); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 4006a17eae0..9388a981382 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.balancer.ContainerBalancerConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; @@ -221,7 +222,7 @@ public ContainerInfo getContainer(long containerID) throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_CONTAINER, auditMap) ); @@ -289,6 +290,28 @@ public ContainerWithPipeline getContainerWithPipeline(long containerID) } } + @Override + public List + getContainerReplicas(long containerId) throws IOException { + List results = new ArrayList<>(); + + Set replicas = getScm().getContainerManager() + .getContainerReplicas(ContainerID.valueOf(containerId)); + for (ContainerReplica r : replicas) { + results.add( + HddsProtos.SCMContainerReplicaProto.newBuilder() + .setContainerID(containerId) + .setState(r.getState().toString()) + .setDatanodeDetails(r.getDatanodeDetails().getProtoBufMessage()) + .setBytesUsed(r.getBytesUsed()) + .setPlaceOfBirth(r.getOriginDatanodeId().toString()) + .setKeyCount(r.getKeyCount()) + .setSequenceID(r.getSequenceId()).build() + ); + } + return results; + } + @Override public List getContainerWithPipelineBatch( List containerIDs) throws IOException { @@ -340,7 +363,7 @@ public List getExistContainerWithPipelinesInBatch( * replication factor. */ private boolean hasRequiredReplicas(ContainerInfo contInfo) { - try{ + try { return getScm().getContainerManager() .getContainerReplicas(contInfo.containerID()) .size() >= contInfo.getReplicationConfig().getRequiredNodes(); @@ -435,7 +458,7 @@ public List listContainer(long startContainerID, buildAuditMessageForFailure(SCMAction.LIST_CONTAINER, auditMap, ex)); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.LIST_CONTAINER, auditMap)); } @@ -460,7 +483,7 @@ public void deleteContainer(long containerID) throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.DELETE_CONTAINER, auditMap) ); @@ -620,7 +643,7 @@ public void closePipeline(HddsProtos.PipelineID pipelineID) @Override public ScmInfo getScmInfo() throws IOException { boolean auditSuccess = true; - try{ + try { ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(scm.getScmStorageConfig().getClusterID()) @@ -644,7 +667,7 @@ public ScmInfo getScmInfo() throws IOException { ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_SCM_INFO, null) ); @@ -710,6 +733,15 @@ public boolean getReplicationManagerStatus() { return scm.getReplicationManager().isRunning(); } + @Override + public ReplicationManagerReport getReplicationManagerReport() + throws IOException { + getScm().checkAdminAccess(getRemoteUser()); + AUDIT.logWriteSuccess(buildAuditMessageForSuccess( + SCMAction.GET_REPLICATION_MANAGER_REPORT, null)); + return scm.getReplicationManager().getContainerReport(); + } + @Override public StatusAndMessages finalizeScmUpgrade(String upgradeClientID) throws IOException { @@ -744,8 +776,8 @@ public StatusAndMessages queryUpgradeFinalizationProgress( @Override public boolean startContainerBalancer( - Optional threshold, Optional idleiterations, - Optional maxDatanodesRatioToInvolvePerIteration, + Optional threshold, Optional iterations, + Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTarget, Optional maxSizeLeavingSource) throws IOException { @@ -754,8 +786,8 @@ public boolean startContainerBalancer( scm.getConfiguration().getObject(ContainerBalancerConfiguration.class); if (threshold.isPresent()) { double tsd = threshold.get(); - Preconditions.checkState(tsd >= 0.0D && tsd < 1.0D, - "threshold should to be specified in range [0.0, 1.0)."); + Preconditions.checkState(tsd >= 0.0D && tsd < 100.0D, + "threshold should be specified in range [0.0, 100.0)."); cbc.setThreshold(tsd); } if (maxSizeToMovePerIterationInGB.isPresent()) { @@ -764,22 +796,22 @@ public boolean startContainerBalancer( "maxSizeToMovePerIterationInGB must be positive."); cbc.setMaxSizeToMovePerIteration(mstm * OzoneConsts.GB); } - if (maxDatanodesRatioToInvolvePerIteration.isPresent()) { - double mdti = maxDatanodesRatioToInvolvePerIteration.get(); - Preconditions.checkState(mdti >= 0.0, - "maxDatanodesRatioToInvolvePerIteration must be " + + if (maxDatanodesPercentageToInvolvePerIteration.isPresent()) { + int mdti = maxDatanodesPercentageToInvolvePerIteration.get(); + Preconditions.checkState(mdti >= 0, + "maxDatanodesPercentageToInvolvePerIteration must be " + "greater than equal to zero."); - Preconditions.checkState(mdti <= 1, - "maxDatanodesRatioToInvolvePerIteration must be " + - "lesser than or equal to one."); - cbc.setMaxDatanodesRatioToInvolvePerIteration(mdti); + Preconditions.checkState(mdti <= 100, + "maxDatanodesPercentageToInvolvePerIteration must be " + + "lesser than or equal to 100."); + cbc.setMaxDatanodesPercentageToInvolvePerIteration(mdti); } - if (idleiterations.isPresent()) { - int idi = idleiterations.get(); - Preconditions.checkState(idi > 0 || idi == -1, - "idleiterations must be positive or" + - " -1(infinitly run container balancer)."); - cbc.setIdleIteration(idi); + if (iterations.isPresent()) { + int i = iterations.get(); + Preconditions.checkState(i > 0 || i == -1, + "number of iterations must be positive or" + + " -1 (for running container balancer infinitely)."); + cbc.setIterations(i); } if (maxSizeEnteringTarget.isPresent()) { @@ -807,7 +839,7 @@ public boolean startContainerBalancer( AUDIT.logWriteFailure(buildAuditMessageForSuccess( SCMAction.START_CONTAINER_BALANCER, null)); } - return isStartedSuccessfully; + return isStartedSuccessfully; } @Override @@ -904,7 +936,8 @@ private HddsProtos.DatanodeUsageInfoProto getUsageInfoFromDatanodeDetails( */ @Override public List getDatanodeUsageInfo( - boolean mostUsed, int count) throws IOException, IllegalArgumentException{ + boolean mostUsed, int count) + throws IOException, IllegalArgumentException { // check admin authorisation try { @@ -945,6 +978,11 @@ public Token getContainerToken(ContainerID containerID) .generateToken(remoteUser.getUserName(), containerID); } + @Override + public long getContainerCount() throws IOException { + return scm.getContainerManager().getContainers().size(); + } + /** * Queries a list of Node that match a set of statuses. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index 248c90c3642..fcadbf7462f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -204,7 +204,7 @@ public SCMVersionResponseProto getVersion(SCMVersionRequestProto buildAuditMessageForFailure(SCMAction.GET_VERSION, null, ex)); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(SCMAction.GET_VERSION, null)); } @@ -249,7 +249,7 @@ public SCMRegisteredResponseProto register( buildAuditMessageForFailure(SCMAction.REGISTER, auditMap, ex)); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.REGISTER, auditMap)); } @@ -284,7 +284,7 @@ public SCMHeartbeatResponseProto sendHeartbeat( ); throw ex; } finally { - if(auditSuccess) { + if (auditSuccess) { AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.SEND_HEARTBEAT, auditMap) ); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 85efdfb9ac5..2fb788d5d7c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -557,7 +557,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, .build(); } - if(configurator.getScmNodeManager() != null) { + if (configurator.getScmNodeManager() != null) { scmNodeManager = configurator.getScmNodeManager(); } else { scmNodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, @@ -616,7 +616,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmHAManager, getScmMetadataStore().getMoveTable()); } - if(configurator.getScmSafeModeManager() != null) { + if (configurator.getScmSafeModeManager() != null) { scmSafeModeManager = configurator.getScmSafeModeManager(); } else { scmSafeModeManager = new SCMSafeModeManager(conf, @@ -642,7 +642,7 @@ private void initializeCAnSecurityProtocol(OzoneConfiguration conf, // TODO: Support Certificate Server loading via Class Name loader. // So it is easy to use different Certificate Servers if needed. - if(this.scmMetadataStore == null) { + if (this.scmMetadataStore == null) { LOG.error("Cannot initialize Certificate Server without a valid meta " + "data layer."); throw new SCMException("Cannot initialize CA without a valid metadata " + @@ -796,7 +796,7 @@ private ContainerTokenSecretManager createContainerTokenSecretManager( private void initalizeMetadataStore(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException { - if(configurator.getMetadataStore() != null) { + if (configurator.getMetadataStore() != null) { scmMetadataStore = configurator.getMetadataStore(); } else { scmMetadataStore = new SCMMetadataStoreImpl(conf); @@ -977,7 +977,7 @@ public static boolean scmBootstrap(OzoneConfiguration conf) // will be persisted into the version file once this node gets added // to existing SCM ring post node regular start up. - if(OzoneSecurityUtil.isSecurityEnabled(conf)) { + if (OzoneSecurityUtil.isSecurityEnabled(conf)) { HASecurityUtils.initializeSecurity(scmStorageConfig, config, getScmAddress(scmhaNodeDetails, conf), false); } @@ -1830,7 +1830,7 @@ public String getSCMNodeId() { } public StatusAndMessages finalizeUpgrade(String upgradeClientID) - throws IOException{ + throws IOException { return upgradeFinalizer.finalize(upgradeClientID, this); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java index f7a07616ac6..ae955ee016f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.server.http.BaseHttpServer; import org.apache.hadoop.ozone.OzoneConsts; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; /** * HttpServer2 wrapper for the Ozone Storage Container Manager. @@ -34,7 +34,7 @@ public StorageContainerManagerHttpServer(MutableConfigurationSource conf, StorageContainerManager scm) throws IOException { super(conf, "scm"); - addServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT, + addServlet("dbCheckpoint", OZONE_DB_CHECKPOINT_HTTP_ENDPOINT, SCMDBCheckpointServlet.class); getWebAppContext().setAttribute(OzoneConsts.SCM_CONTEXT_ATTRIBUTE, scm); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index 1d8859fac34..030601a5fd3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -179,7 +179,7 @@ public void start(OzoneConfiguration conf) throws Exception { @Override public boolean init(OzoneConfiguration conf, String clusterId) - throws IOException{ + throws IOException { return StorageContainerManager.scmInit(conf, clusterId); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index d4d11ffd621..feb58fc0983 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -17,36 +17,699 @@ */ package org.apache.hadoop.hdds.scm; -import java.util.ArrayList; -import java.util.List; - +import com.google.common.base.Preconditions; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.CRLStatusReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.PipelineAction; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos; + .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.PipelineReport; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer - .NodeRegistrationContainerReport; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.server.SCMConfigurator; +import org.apache.hadoop.hdds.scm.server + .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; +import org.apache.hadoop.hdds.scm.server + .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol + .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol + .proto.StorageContainerDatanodeProtocolProtos.CommandStatus; +import org.apache.hadoop.hdds.protocol + .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.StorageReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; +import org.apache.hadoop.hdds.protocol.proto + .StorageContainerDatanodeProtocolProtos.StorageTypeProto; +import org.apache.hadoop.hdds.scm.node.SCMNodeManager; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.common.Storage; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; +import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; +import org.apache.hadoop.security.authentication.client + .AuthenticationException; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; /** * Stateless helper functions for Hdds tests. */ public final class HddsTestUtils { + private static ThreadLocalRandom random = ThreadLocalRandom.current(); + private static PipelineID randomPipelineID = PipelineID.randomId(); + private HddsTestUtils() { } + /** + * Generates DatanodeDetails from RegisteredCommand. + * + * @param registeredCommand registration response from SCM + * + * @return DatanodeDetails + */ + public static DatanodeDetails getDatanodeDetails( + RegisteredCommand registeredCommand) { + return MockDatanodeDetails.createDatanodeDetails( + registeredCommand.getDatanode().getUuidString(), + registeredCommand.getDatanode().getHostName(), + registeredCommand.getDatanode().getIpAddress(), + null); + } + + /** + * Creates a random DatanodeDetails and register it with the given + * NodeManager. + * + * @param nodeManager NodeManager + * + * @return DatanodeDetails + */ + public static DatanodeDetails createRandomDatanodeAndRegister( + SCMNodeManager nodeManager) { + return getDatanodeDetails( + nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null, + getRandomPipelineReports())); + } + + /** + * Get specified number of DatanodeDetails and register them with node + * manager. + * + * @param nodeManager node manager to register the datanode ids. + * @param count number of DatanodeDetails needed. + * + * @return list of DatanodeDetails + */ + public static List getListOfRegisteredDatanodeDetails( + SCMNodeManager nodeManager, int count) { + ArrayList datanodes = new ArrayList<>(); + for (int i = 0; i < count; i++) { + datanodes.add(createRandomDatanodeAndRegister(nodeManager)); + } + return datanodes; + } + + /** + * Generates a random NodeReport. + * + * @return NodeReportProto + */ + public static NodeReportProto getRandomNodeReport() { + return getRandomNodeReport(1, 1); + } + + /** + * Generates random NodeReport with the given number of storage report in it. + * + * @param numberOfStorageReport number of storage report this node report + * should have + * @param numberOfMetadataStorageReport number of metadata storage report + * this node report should have + * @return NodeReportProto + */ + public static NodeReportProto getRandomNodeReport(int numberOfStorageReport, + int numberOfMetadataStorageReport) { + UUID nodeId = UUID.randomUUID(); + return getRandomNodeReport(nodeId, File.separator + nodeId, + numberOfStorageReport, numberOfMetadataStorageReport); + } + + /** + * Generates random NodeReport for the given nodeId with the given + * base path and number of storage report in it. + * + * @param nodeId datanode id + * @param basePath base path of storage directory + * @param numberOfStorageReport number of storage report + * @param numberOfMetadataStorageReport number of metadata storage report + * + * @return NodeReportProto + */ + public static NodeReportProto getRandomNodeReport(UUID nodeId, + String basePath, int numberOfStorageReport, + int numberOfMetadataStorageReport) { + List storageReports = new ArrayList<>(); + for (int i = 0; i < numberOfStorageReport; i++) { + storageReports.add(getRandomStorageReport(nodeId, + basePath + File.separator + "data-" + i)); + } + List metadataStorageReports = + new ArrayList<>(); + for (int i = 0; i < numberOfMetadataStorageReport; i++) { + metadataStorageReports.add(getRandomMetadataStorageReport( + basePath + File.separator + "metadata-" + i)); + } + return createNodeReport(storageReports, metadataStorageReports); + } + + /** + * Creates NodeReport with the given storage reports. + * + * @param reports storage reports to be included in the node report. + * @param metaReports metadata storage reports to be included + * in the node report. + * @return NodeReportProto + */ + public static NodeReportProto createNodeReport( + List reports, + List metaReports) { + NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); + nodeReport.addAllStorageReport(reports); + nodeReport.addAllMetadataStorageReport(metaReports); + return nodeReport.build(); + } + + /** + * Generates random storage report. + * + * @param nodeId datanode id for which the storage report belongs to + * @param path path of the storage + * + * @return StorageReportProto + */ + public static StorageReportProto getRandomStorageReport(UUID nodeId, + String path) { + return createStorageReport(nodeId, path, + random.nextInt(1000), + random.nextInt(500), + random.nextInt(500), + StorageTypeProto.DISK); + } + + /** + * Generates random metadata storage report. + * + * @param path path of the storage + * + * @return MetadataStorageReportProto + */ + public static MetadataStorageReportProto getRandomMetadataStorageReport( + String path) { + return createMetadataStorageReport(path, + random.nextInt(1000), + random.nextInt(500), + random.nextInt(500), + StorageTypeProto.DISK); + } + + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity) { + return createStorageReport(nodeId, path, + capacity, + 0, + capacity, + StorageTypeProto.DISK); + } + + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity, long used, long remaining, StorageTypeProto type) { + return createStorageReport(nodeId, path, capacity, used, remaining, + type, false); + } + /** + * Creates storage report with the given information. + * + * @param nodeId datanode id + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static StorageReportProto createStorageReport(UUID nodeId, String path, + long capacity, long used, long remaining, StorageTypeProto type, + boolean failed) { + Preconditions.checkNotNull(nodeId); + Preconditions.checkNotNull(path); + StorageReportProto.Builder srb = StorageReportProto.newBuilder(); + srb.setStorageUuid(nodeId.toString()) + .setStorageLocation(path) + .setCapacity(capacity) + .setScmUsed(used) + .setFailed(failed) + .setRemaining(remaining); + StorageTypeProto storageTypeProto = + type == null ? StorageTypeProto.DISK : type; + srb.setStorageType(storageTypeProto); + return srb.build(); + } + + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity) { + return createMetadataStorageReport(path, + capacity, + 0, + capacity, + StorageTypeProto.DISK, false); + } + + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity, long used, long remaining, + StorageTypeProto type) { + return createMetadataStorageReport(path, capacity, used, remaining, + type, false); + } + + /** + * Creates metadata storage report with the given information. + * + * @param path storage dir + * @param capacity storage size + * @param used space used + * @param remaining space remaining + * @param type type of storage + * + * @return StorageReportProto + */ + public static MetadataStorageReportProto createMetadataStorageReport( + String path, long capacity, long used, long remaining, + StorageTypeProto type, boolean failed) { + Preconditions.checkNotNull(path); + MetadataStorageReportProto.Builder srb = MetadataStorageReportProto + .newBuilder(); + srb.setStorageLocation(path) + .setCapacity(capacity) + .setScmUsed(used) + .setFailed(failed) + .setRemaining(remaining); + StorageTypeProto storageTypeProto = + type == null ? StorageTypeProto.DISK : type; + srb.setStorageType(storageTypeProto); + return srb.build(); + } + + /** + * Generates random container reports. + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getRandomContainerReports() { + return getRandomContainerReports(1); + } + + /** + * Generates random container report with the given number of containers. + * + * @param numberOfContainers number of containers to be in container report + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getRandomContainerReports( + int numberOfContainers) { + List containerInfos = new ArrayList<>(); + for (int i = 0; i < numberOfContainers; i++) { + containerInfos.add(getRandomContainerInfo(i)); + } + return getContainerReports(containerInfos); + } + + + public static PipelineReportsProto getRandomPipelineReports() { + return PipelineReportsProto.newBuilder().build(); + } + + public static PipelineReportFromDatanode getPipelineReportFromDatanode( + DatanodeDetails dn, PipelineID... pipelineIDs) { + PipelineReportsProto.Builder reportBuilder = + PipelineReportsProto.newBuilder(); + for (PipelineID pipelineID : pipelineIDs) { + reportBuilder.addPipelineReport( + PipelineReport.newBuilder() + .setPipelineID(pipelineID.getProtobuf()) + .setIsLeader(false)); + } + return new PipelineReportFromDatanode(dn, reportBuilder.build()); + } + + public static PipelineReportFromDatanode getPipelineReportFromDatanode( + DatanodeDetails dn, PipelineID pipelineID, boolean isLeader) { + PipelineReportsProto.Builder reportBuilder = + PipelineReportsProto.newBuilder(); + reportBuilder.addPipelineReport(PipelineReport.newBuilder() + .setPipelineID(pipelineID.getProtobuf()).setIsLeader(isLeader)); + return new PipelineReportFromDatanode(dn, reportBuilder.build()); + } + + public static void openAllRatisPipelines(PipelineManager pipelineManager) + throws IOException { + // Pipeline is created by background thread + for (ReplicationFactor factor : ReplicationFactor.values()) { + // Trigger the processed pipeline report event + for (Pipeline pipeline : pipelineManager + .getPipelines(new RatisReplicationConfig(factor))) { + pipelineManager.openPipeline(pipeline.getId()); + } + } + } + + public static PipelineActionsFromDatanode getPipelineActionFromDatanode( + DatanodeDetails dn, PipelineID... pipelineIDs) { + PipelineActionsProto.Builder actionsProtoBuilder = + PipelineActionsProto.newBuilder(); + for (PipelineID pipelineID : pipelineIDs) { + ClosePipelineInfo closePipelineInfo = + ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()) + .setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED) + .setDetailedReason("").build(); + actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder() + .setClosePipeline(closePipelineInfo) + .setAction(PipelineAction.Action.CLOSE) + .build()); + } + return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build()); + } + + /** + * Creates container report with the given ContainerInfo(s). + * + * @param containerInfos one or more ContainerInfo + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getContainerReports( + ContainerReplicaProto... containerInfos) { + return getContainerReports(Arrays.asList(containerInfos)); + } + + /** + * Creates container report with the given ContainerInfo(s). + * + * @param containerInfos list of ContainerInfo + * + * @return ContainerReportsProto + */ + public static ContainerReportsProto getContainerReports( + List containerInfos) { + ContainerReportsProto.Builder + reportsBuilder = ContainerReportsProto.newBuilder(); + for (ContainerReplicaProto containerInfo : containerInfos) { + reportsBuilder.addReports(containerInfo); + } + return reportsBuilder.build(); + } + + /** + * Generates random ContainerInfo. + * + * @param containerId container id of the ContainerInfo + * + * @return ContainerInfo + */ + public static ContainerReplicaProto getRandomContainerInfo( + long containerId) { + return createContainerInfo(containerId, + OzoneConsts.GB * 5, + random.nextLong(1000), + OzoneConsts.GB * random.nextInt(5), + random.nextLong(1000), + OzoneConsts.GB * random.nextInt(2), + random.nextLong(1000), + OzoneConsts.GB * random.nextInt(5)); + } + + /** + * Creates ContainerInfo with the given details. + * + * @param containerId id of the container + * @param size size of container + * @param keyCount number of keys + * @param bytesUsed bytes used by the container + * @param readCount number of reads + * @param readBytes bytes read + * @param writeCount number of writes + * @param writeBytes bytes written + * + * @return ContainerInfo + */ + @SuppressWarnings("parameternumber") + public static ContainerReplicaProto createContainerInfo( + long containerId, long size, long keyCount, long bytesUsed, + long readCount, long readBytes, long writeCount, long writeBytes) { + return ContainerReplicaProto.newBuilder() + .setContainerID(containerId) + .setState(ContainerReplicaProto.State.OPEN) + .setSize(size) + .setKeyCount(keyCount) + .setUsed(bytesUsed) + .setReadCount(readCount) + .setReadBytes(readBytes) + .setWriteCount(writeCount) + .setWriteBytes(writeBytes) + .build(); + } + + /** + * Create Command Status report object. + * @return CommandStatusReportsProto + */ + public static CommandStatusReportsProto createCommandStatusReport( + List reports) { + CommandStatusReportsProto.Builder report = CommandStatusReportsProto + .newBuilder(); + report.addAllCmdStatus(reports); + return report.build(); + } + + /** + * Create CRL Status report object. + * @param pendingCRLIds List of Pending CRL Ids in the report. + * @param receivedCRLId Latest received CRL Id in the report. + * @return {@link CRLStatusReport} + */ + public static CRLStatusReport createCRLStatusReport( + List pendingCRLIds, long receivedCRLId) { + CRLStatusReport.Builder report = CRLStatusReport.newBuilder(); + report.addAllPendingCrlIds(pendingCRLIds); + report.setReceivedCrlId(receivedCRLId); + return report.build(); + } + + public static org.apache.hadoop.hdds.scm.container.ContainerInfo + allocateContainer(ContainerManager containerManager) + throws IOException { + return containerManager + .allocateContainer(new RatisReplicationConfig(ReplicationFactor.THREE), + "root"); + + } + + public static void closeContainer(ContainerManager containerManager, + ContainerID id) throws IOException, InvalidStateTransitionException { + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.FINALIZE); + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.CLOSE); + + } + + /** + * Move the container to Quaise close state. + * @param containerManager + * @param id + * @throws IOException + */ + public static void quasiCloseContainer(ContainerManager containerManager, + ContainerID id) throws IOException, InvalidStateTransitionException { + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.FINALIZE); + containerManager.updateContainerState( + id, HddsProtos.LifeCycleEvent.QUASI_CLOSE); + + } + + /** + * Construct and returns StorageContainerManager instance using the given + * configuration. + * + * @param conf OzoneConfiguration + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScmSimple(OzoneConfiguration conf) + throws IOException, AuthenticationException { + SCMConfigurator configurator = new SCMConfigurator(); + // The default behaviour whether ratis will be enabled or not + // in SCM will be inferred from ozone-default.xml. + // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); + return StorageContainerManager.createSCM(conf, configurator); + } + + /** + * Construct and returns StorageContainerManager instance using the given + * configuration. The ports used by this StorageContainerManager are + * randomly selected from free ports available. + * + * @param conf OzoneConfiguration + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScm(OzoneConfiguration conf) + throws IOException, AuthenticationException { + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); + configurator.setScmContext(SCMContext.emptyContext()); + return getScm(conf, configurator); + } + + /** + * Construct and returns StorageContainerManager instance using the given + * configuration and the configurator. The ports used by this + * StorageContainerManager are randomly selected from free ports available. + * + * @param conf OzoneConfiguration + * @param configurator SCMConfigurator + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScm(OzoneConfiguration conf, + SCMConfigurator configurator) + throws IOException, AuthenticationException { + conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); + if (scmStore.getState() != Storage.StorageState.INITIALIZED) { + String clusterId = UUID.randomUUID().toString(); + String scmId = UUID.randomUUID().toString(); + scmStore.setClusterId(clusterId); + scmStore.setScmId(scmId); + // writes the version file properties + scmStore.initialize(); + } + return StorageContainerManager.createSCM(conf, configurator); + } + + private static ContainerInfo.Builder getDefaultContainerInfoBuilder( + final HddsProtos.LifeCycleState state) { + return new ContainerInfo.Builder() + .setContainerID(RandomUtils.nextLong()) + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) + .setState(state) + .setSequenceId(10000L) + .setOwner("TEST"); + } + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(randomPipelineID) + .build(); + } + + public static ContainerInfo getContainer( + final HddsProtos.LifeCycleState state, PipelineID pipelineID) { + return getDefaultContainerInfoBuilder(state) + .setPipelineID(pipelineID) + .build(); + } + + public static Set getReplicas( + final ContainerID containerId, + final ContainerReplicaProto.State state, + final DatanodeDetails... datanodeDetails) { + return getReplicas(containerId, state, 10000L, datanodeDetails); + } + + public static Set getReplicas( + final ContainerID containerId, + final ContainerReplicaProto.State state, + final long sequenceId, + final DatanodeDetails... datanodeDetails) { + Set replicas = new HashSet<>(); + for (DatanodeDetails datanode : datanodeDetails) { + replicas.add(getReplicas(containerId, state, + sequenceId, datanode.getUuid(), datanode)); + } + return replicas; + } + + public static ContainerReplica getReplicas( + final ContainerID containerId, + final ContainerReplicaProto.State state, + final long sequenceId, + final UUID originNodeId, + final DatanodeDetails datanodeDetails) { + return ContainerReplica.newBuilder() + .setContainerID(containerId) + .setContainerState(state) + .setDatanodeDetails(datanodeDetails) + .setOriginNodeId(originNodeId) + .setSequenceId(sequenceId) + .setBytesUsed(100) + .build(); + } + + public static Pipeline getRandomPipeline() { + List nodes = new ArrayList<>(); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + nodes.add(MockDatanodeDetails.randomDatanodeDetails()); + return Pipeline.newBuilder() + .setReplicationConfig( + new RatisReplicationConfig(ReplicationFactor.THREE)) + .setId(PipelineID.randomId()) + .setNodes(nodes) + .setState(Pipeline.PipelineState.OPEN) + .build(); + } + /** * Create Command Status report object. * * @param numOfContainers number of containers to be included in report. * @return CommandStatusReportsProto */ - public static NodeRegistrationContainerReport + public static SCMDatanodeProtocolServer.NodeRegistrationContainerReport createNodeRegistrationContainerReport(int numOfContainers) { - return new NodeRegistrationContainerReport( + return new SCMDatanodeProtocolServer.NodeRegistrationContainerReport( MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.getRandomContainerReports(numOfContainers)); + getRandomContainerReports(numOfContainers)); } /** @@ -55,16 +718,16 @@ private HddsTestUtils() { * @param dnContainers List of containers to be included in report * @return NodeRegistrationContainerReport */ - public static NodeRegistrationContainerReport + public static SCMDatanodeProtocolServer.NodeRegistrationContainerReport createNodeRegistrationContainerReport(List dnContainers) { - List + List containers = new ArrayList<>(); dnContainers.forEach(c -> { - containers.add(TestUtils.getRandomContainerInfo(c.getContainerID())); + containers.add(getRandomContainerInfo(c.getContainerID())); }); - return new NodeRegistrationContainerReport( + return new SCMDatanodeProtocolServer.NodeRegistrationContainerReport( MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.getContainerReports(containers)); + getContainerReports(containers)); } /** @@ -83,5 +746,4 @@ public static List getContainerInfo(int numContainers) { } return containerInfoList; } - } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java index 20c04685673..0c9222d0619 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java @@ -43,7 +43,7 @@ public class TestHddsServerUtil { public Timeout timeout = Timeout.seconds(300); @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); /** * Verify that the datanode endpoint is parsed correctly. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java index f7386500fd1..8394c5a6b7b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java @@ -56,7 +56,7 @@ public class TestHddsServerUtils { public Timeout timeout = Timeout.seconds(300);; @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); /** * Test getting OZONE_SCM_DATANODE_ADDRESS_KEY with port. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java deleted file mode 100644 index 068f3d774c2..00000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ /dev/null @@ -1,699 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CRLStatusReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineActionsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerManager; -import org.apache.hadoop.hdds.scm.container.ContainerReplica; -import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; -import org.apache.hadoop.hdds.scm.ha.SCMContext; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; -import org.apache.hadoop.hdds.scm.server - .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; -import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatus; -import org.apache.hadoop.hdds.protocol - .proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.StorageTypeProto; -import org.apache.hadoop.hdds.scm.node.SCMNodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; -import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand; -import org.apache.hadoop.security.authentication.client - .AuthenticationException; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ThreadLocalRandom; - -/** - * Stateless helper functions to handler scm/datanode connection. - */ -public final class TestUtils { - - private static ThreadLocalRandom random = ThreadLocalRandom.current(); - private static PipelineID randomPipelineID = PipelineID.randomId(); - - private TestUtils() { - } - - /** - * Generates DatanodeDetails from RegisteredCommand. - * - * @param registeredCommand registration response from SCM - * - * @return DatanodeDetails - */ - public static DatanodeDetails getDatanodeDetails( - RegisteredCommand registeredCommand) { - return MockDatanodeDetails.createDatanodeDetails( - registeredCommand.getDatanode().getUuidString(), - registeredCommand.getDatanode().getHostName(), - registeredCommand.getDatanode().getIpAddress(), - null); - } - - /** - * Creates a random DatanodeDetails and register it with the given - * NodeManager. - * - * @param nodeManager NodeManager - * - * @return DatanodeDetails - */ - public static DatanodeDetails createRandomDatanodeAndRegister( - SCMNodeManager nodeManager) { - return getDatanodeDetails( - nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null, - getRandomPipelineReports())); - } - - /** - * Get specified number of DatanodeDetails and register them with node - * manager. - * - * @param nodeManager node manager to register the datanode ids. - * @param count number of DatanodeDetails needed. - * - * @return list of DatanodeDetails - */ - public static List getListOfRegisteredDatanodeDetails( - SCMNodeManager nodeManager, int count) { - ArrayList datanodes = new ArrayList<>(); - for (int i = 0; i < count; i++) { - datanodes.add(createRandomDatanodeAndRegister(nodeManager)); - } - return datanodes; - } - - /** - * Generates a random NodeReport. - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport() { - return getRandomNodeReport(1, 1); - } - - /** - * Generates random NodeReport with the given number of storage report in it. - * - * @param numberOfStorageReport number of storage report this node report - * should have - * @param numberOfMetadataStorageReport number of metadata storage report - * this node report should have - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(int numberOfStorageReport, - int numberOfMetadataStorageReport) { - UUID nodeId = UUID.randomUUID(); - return getRandomNodeReport(nodeId, File.separator + nodeId, - numberOfStorageReport, numberOfMetadataStorageReport); - } - - /** - * Generates random NodeReport for the given nodeId with the given - * base path and number of storage report in it. - * - * @param nodeId datanode id - * @param basePath base path of storage directory - * @param numberOfStorageReport number of storage report - * @param numberOfMetadataStorageReport number of metadata storage report - * - * @return NodeReportProto - */ - public static NodeReportProto getRandomNodeReport(UUID nodeId, - String basePath, int numberOfStorageReport, - int numberOfMetadataStorageReport) { - List storageReports = new ArrayList<>(); - for (int i = 0; i < numberOfStorageReport; i++) { - storageReports.add(getRandomStorageReport(nodeId, - basePath + File.separator + "data-" + i)); - } - List metadataStorageReports = - new ArrayList<>(); - for (int i = 0; i < numberOfMetadataStorageReport; i++) { - metadataStorageReports.add(getRandomMetadataStorageReport( - basePath + File.separator + "metadata-" + i)); - } - return createNodeReport(storageReports, metadataStorageReports); - } - - /** - * Creates NodeReport with the given storage reports. - * - * @param reports storage reports to be included in the node report. - * @param metaReports metadata storage reports to be included - * in the node report. - * @return NodeReportProto - */ - public static NodeReportProto createNodeReport( - List reports, - List metaReports) { - NodeReportProto.Builder nodeReport = NodeReportProto.newBuilder(); - nodeReport.addAllStorageReport(reports); - nodeReport.addAllMetadataStorageReport(metaReports); - return nodeReport.build(); - } - - /** - * Generates random storage report. - * - * @param nodeId datanode id for which the storage report belongs to - * @param path path of the storage - * - * @return StorageReportProto - */ - public static StorageReportProto getRandomStorageReport(UUID nodeId, - String path) { - return createStorageReport(nodeId, path, - random.nextInt(1000), - random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); - } - - /** - * Generates random metadata storage report. - * - * @param path path of the storage - * - * @return MetadataStorageReportProto - */ - public static MetadataStorageReportProto getRandomMetadataStorageReport( - String path) { - return createMetadataStorageReport(path, - random.nextInt(1000), - random.nextInt(500), - random.nextInt(500), - StorageTypeProto.DISK); - } - - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity) { - return createStorageReport(nodeId, path, - capacity, - 0, - capacity, - StorageTypeProto.DISK); - } - - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type) { - return createStorageReport(nodeId, path, capacity, used, remaining, - type, false); - } - /** - * Creates storage report with the given information. - * - * @param nodeId datanode id - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ - public static StorageReportProto createStorageReport(UUID nodeId, String path, - long capacity, long used, long remaining, StorageTypeProto type, - boolean failed) { - Preconditions.checkNotNull(nodeId); - Preconditions.checkNotNull(path); - StorageReportProto.Builder srb = StorageReportProto.newBuilder(); - srb.setStorageUuid(nodeId.toString()) - .setStorageLocation(path) - .setCapacity(capacity) - .setScmUsed(used) - .setFailed(failed) - .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); - return srb.build(); - } - - public static MetadataStorageReportProto createMetadataStorageReport( - String path, long capacity) { - return createMetadataStorageReport(path, - capacity, - 0, - capacity, - StorageTypeProto.DISK, false); - } - - public static MetadataStorageReportProto createMetadataStorageReport( - String path, long capacity, long used, long remaining, - StorageTypeProto type) { - return createMetadataStorageReport(path, capacity, used, remaining, - type, false); - } - - /** - * Creates metadata storage report with the given information. - * - * @param path storage dir - * @param capacity storage size - * @param used space used - * @param remaining space remaining - * @param type type of storage - * - * @return StorageReportProto - */ - public static MetadataStorageReportProto createMetadataStorageReport( - String path, long capacity, long used, long remaining, - StorageTypeProto type, boolean failed) { - Preconditions.checkNotNull(path); - MetadataStorageReportProto.Builder srb = MetadataStorageReportProto - .newBuilder(); - srb.setStorageLocation(path) - .setCapacity(capacity) - .setScmUsed(used) - .setFailed(failed) - .setRemaining(remaining); - StorageTypeProto storageTypeProto = - type == null ? StorageTypeProto.DISK : type; - srb.setStorageType(storageTypeProto); - return srb.build(); - } - - /** - * Generates random container reports. - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports() { - return getRandomContainerReports(1); - } - - /** - * Generates random container report with the given number of containers. - * - * @param numberOfContainers number of containers to be in container report - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getRandomContainerReports( - int numberOfContainers) { - List containerInfos = new ArrayList<>(); - for (int i = 0; i < numberOfContainers; i++) { - containerInfos.add(getRandomContainerInfo(i)); - } - return getContainerReports(containerInfos); - } - - - public static PipelineReportsProto getRandomPipelineReports() { - return PipelineReportsProto.newBuilder().build(); - } - - public static PipelineReportFromDatanode getPipelineReportFromDatanode( - DatanodeDetails dn, PipelineID... pipelineIDs) { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - for (PipelineID pipelineID : pipelineIDs) { - reportBuilder.addPipelineReport( - PipelineReport.newBuilder() - .setPipelineID(pipelineID.getProtobuf()) - .setIsLeader(false)); - } - return new PipelineReportFromDatanode(dn, reportBuilder.build()); - } - - public static PipelineReportFromDatanode getPipelineReportFromDatanode( - DatanodeDetails dn, PipelineID pipelineID, boolean isLeader) { - PipelineReportsProto.Builder reportBuilder = - PipelineReportsProto.newBuilder(); - reportBuilder.addPipelineReport(PipelineReport.newBuilder() - .setPipelineID(pipelineID.getProtobuf()).setIsLeader(isLeader)); - return new PipelineReportFromDatanode(dn, reportBuilder.build()); - } - - public static void openAllRatisPipelines(PipelineManager pipelineManager) - throws IOException { - // Pipeline is created by background thread - for (ReplicationFactor factor : ReplicationFactor.values()) { - // Trigger the processed pipeline report event - for (Pipeline pipeline : pipelineManager - .getPipelines(new RatisReplicationConfig(factor))) { - pipelineManager.openPipeline(pipeline.getId()); - } - } - } - - public static PipelineActionsFromDatanode getPipelineActionFromDatanode( - DatanodeDetails dn, PipelineID... pipelineIDs) { - PipelineActionsProto.Builder actionsProtoBuilder = - PipelineActionsProto.newBuilder(); - for (PipelineID pipelineID : pipelineIDs) { - ClosePipelineInfo closePipelineInfo = - ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()) - .setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED) - .setDetailedReason("").build(); - actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder() - .setClosePipeline(closePipelineInfo) - .setAction(PipelineAction.Action.CLOSE) - .build()); - } - return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build()); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos one or more ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - ContainerReplicaProto... containerInfos) { - return getContainerReports(Arrays.asList(containerInfos)); - } - - /** - * Creates container report with the given ContainerInfo(s). - * - * @param containerInfos list of ContainerInfo - * - * @return ContainerReportsProto - */ - public static ContainerReportsProto getContainerReports( - List containerInfos) { - ContainerReportsProto.Builder - reportsBuilder = ContainerReportsProto.newBuilder(); - for (ContainerReplicaProto containerInfo : containerInfos) { - reportsBuilder.addReports(containerInfo); - } - return reportsBuilder.build(); - } - - /** - * Generates random ContainerInfo. - * - * @param containerId container id of the ContainerInfo - * - * @return ContainerInfo - */ - public static ContainerReplicaProto getRandomContainerInfo( - long containerId) { - return createContainerInfo(containerId, - OzoneConsts.GB * 5, - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(2), - random.nextLong(1000), - OzoneConsts.GB * random.nextInt(5)); - } - - /** - * Creates ContainerInfo with the given details. - * - * @param containerId id of the container - * @param size size of container - * @param keyCount number of keys - * @param bytesUsed bytes used by the container - * @param readCount number of reads - * @param readBytes bytes read - * @param writeCount number of writes - * @param writeBytes bytes written - * - * @return ContainerInfo - */ - @SuppressWarnings("parameternumber") - public static ContainerReplicaProto createContainerInfo( - long containerId, long size, long keyCount, long bytesUsed, - long readCount, long readBytes, long writeCount, long writeBytes) { - return ContainerReplicaProto.newBuilder() - .setContainerID(containerId) - .setState(ContainerReplicaProto.State.OPEN) - .setSize(size) - .setKeyCount(keyCount) - .setUsed(bytesUsed) - .setReadCount(readCount) - .setReadBytes(readBytes) - .setWriteCount(writeCount) - .setWriteBytes(writeBytes) - .build(); - } - - /** - * Create Command Status report object. - * @return CommandStatusReportsProto - */ - public static CommandStatusReportsProto createCommandStatusReport( - List reports) { - CommandStatusReportsProto.Builder report = CommandStatusReportsProto - .newBuilder(); - report.addAllCmdStatus(reports); - return report.build(); - } - - /** - * Create CRL Status report object. - * @param pendingCRLIds List of Pending CRL Ids in the report. - * @param receivedCRLId Latest received CRL Id in the report. - * @return {@link CRLStatusReport} - */ - public static CRLStatusReport createCRLStatusReport( - List pendingCRLIds, long receivedCRLId) { - CRLStatusReport.Builder report = CRLStatusReport.newBuilder(); - report.addAllPendingCrlIds(pendingCRLIds); - report.setReceivedCrlId(receivedCRLId); - return report.build(); - } - - public static org.apache.hadoop.hdds.scm.container.ContainerInfo - allocateContainer(ContainerManager containerManager) - throws IOException { - return containerManager - .allocateContainer(new RatisReplicationConfig(ReplicationFactor.THREE), - "root"); - - } - - public static void closeContainer(ContainerManager containerManager, - ContainerID id) throws IOException, InvalidStateTransitionException { - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.CLOSE); - - } - - /** - * Move the container to Quaise close state. - * @param containerManager - * @param id - * @throws IOException - */ - public static void quasiCloseContainer(ContainerManager containerManager, - ContainerID id) throws IOException, InvalidStateTransitionException { - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.FINALIZE); - containerManager.updateContainerState( - id, HddsProtos.LifeCycleEvent.QUASI_CLOSE); - - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration. - * - * @param conf OzoneConfiguration - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScmSimple(OzoneConfiguration conf) - throws IOException, AuthenticationException { - SCMConfigurator configurator = new SCMConfigurator(); - // The default behaviour whether ratis will be enabled or not - // in SCM will be inferred from ozone-default.xml. - // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - return StorageContainerManager.createSCM(conf, configurator); - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration. The ports used by this StorageContainerManager are - * randomly selected from free ports available. - * - * @param conf OzoneConfiguration - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - SCMConfigurator configurator = new SCMConfigurator(); - configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); - configurator.setScmContext(SCMContext.emptyContext()); - return getScm(conf, configurator); - } - - /** - * Construct and returns StorageContainerManager instance using the given - * configuration and the configurator. The ports used by this - * StorageContainerManager are randomly selected from free ports available. - * - * @param conf OzoneConfiguration - * @param configurator SCMConfigurator - * @return StorageContainerManager instance - * @throws IOException - * @throws AuthenticationException - */ - public static StorageContainerManager getScm(OzoneConfiguration conf, - SCMConfigurator configurator) - throws IOException, AuthenticationException { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return StorageContainerManager.createSCM(conf, configurator); - } - - private static ContainerInfo.Builder getDefaultContainerInfoBuilder( - final HddsProtos.LifeCycleState state) { - return new ContainerInfo.Builder() - .setContainerID(RandomUtils.nextLong()) - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .setState(state) - .setSequenceId(10000L) - .setOwner("TEST"); - } - - public static ContainerInfo getContainer( - final HddsProtos.LifeCycleState state) { - return getDefaultContainerInfoBuilder(state) - .setPipelineID(randomPipelineID) - .build(); - } - - public static ContainerInfo getContainer( - final HddsProtos.LifeCycleState state, PipelineID pipelineID) { - return getDefaultContainerInfoBuilder(state) - .setPipelineID(pipelineID) - .build(); - } - - public static Set getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final DatanodeDetails... datanodeDetails) { - return getReplicas(containerId, state, 10000L, datanodeDetails); - } - - public static Set getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long sequenceId, - final DatanodeDetails... datanodeDetails) { - Set replicas = new HashSet<>(); - for (DatanodeDetails datanode : datanodeDetails) { - replicas.add(getReplicas(containerId, state, - sequenceId, datanode.getUuid(), datanode)); - } - return replicas; - } - - public static ContainerReplica getReplicas( - final ContainerID containerId, - final ContainerReplicaProto.State state, - final long sequenceId, - final UUID originNodeId, - final DatanodeDetails datanodeDetails) { - return ContainerReplica.newBuilder() - .setContainerID(containerId) - .setContainerState(state) - .setDatanodeDetails(datanodeDetails) - .setOriginNodeId(originNodeId) - .setSequenceId(sequenceId) - .setBytesUsed(100) - .build(); - } - - public static Pipeline getRandomPipeline() { - List nodes = new ArrayList<>(); - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - nodes.add(MockDatanodeDetails.randomDatanodeDetails()); - return Pipeline.newBuilder() - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .setId(PipelineID.randomId()) - .setNodes(nodes) - .setState(Pipeline.PipelineState.OPEN) - .build(); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index b6f537c1732..e8bd07bdddb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator; import org.apache.hadoop.hdds.scm.ha.SCMServiceManager; @@ -103,7 +103,7 @@ public class TestBlockManager { public ExpectedException thrown = ExpectedException.none(); @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); private SCMMetadataStore scmMetadataStore; private ReplicationConfig replicationConfig; @@ -172,7 +172,7 @@ public void emitSafeModeStatus() { configurator.setMetadataStore(scmMetadataStore); configurator.setSCMHAManager(scmHAManager); configurator.setScmContext(scmContext); - scm = TestUtils.getScm(conf, configurator); + scm = HddsTestUtils.getScm(conf, configurator); // Initialize these fields so that the tests can pass. mapping = scm.getContainerManager(); @@ -198,7 +198,7 @@ public void cleanup() throws Exception { @Test public void testAllocateBlock() throws Exception { pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, new ExcludeList()); Assert.assertNotNull(block); @@ -212,7 +212,7 @@ public void testAllocateBlockWithExclusion() throws Exception { } } catch (IOException e) { } - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); ExcludeList excludeList = new ExcludeList(); excludeList .addPipeline(pipelineManager.getPipelines(replicationConfig) @@ -280,7 +280,7 @@ public void testBlockDistribution() throws Exception { executors.add(Executors.newSingleThreadExecutor()); } pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); List> futureList = @@ -342,7 +342,7 @@ public void testBlockDistributionWithMultipleDisks() throws Exception { executors.add(Executors.newSingleThreadExecutor()); } pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); List> futureList = @@ -410,7 +410,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { executors.add(Executors.newSingleThreadExecutor()); } pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); Map> allocatedBlockMap = new ConcurrentHashMap<>(); List> futureList = @@ -452,7 +452,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { // the pipeline per raft log disk config is set to 1 by default int numContainers = (int)Math.ceil((double) (numContainerPerOwnerInPipeline * - numContainerPerOwnerInPipeline)/numMetaDataVolumes); + numContainerPerOwnerInPipeline) / numMetaDataVolumes); Assert.assertTrue(numContainers == pipelineManager. getNumberOfContainers(pipeline.getId())); Assert.assertTrue( @@ -497,7 +497,7 @@ public void testMultipleBlockAllocation() pipelineManager.createPipeline(replicationConfig); pipelineManager.createPipeline(replicationConfig); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); AllocatedBlock allocatedBlock = blockManager .allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, @@ -544,7 +544,7 @@ public void testMultipleBlockAllocationWithClosedContainer() / replicationConfig.getRequiredNodes(); i++) { pipelineManager.createPipeline(replicationConfig); } - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); // wait till each pipeline has the configured number of containers. // After this each pipeline has numContainerPerOwnerInPipeline containers diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 297b6119bd4..f3b3ccbd3ce 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -101,7 +101,7 @@ public void setup() throws Exception { conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - scm = TestUtils.getScm(conf); + scm = HddsTestUtils.getScm(conf); containerManager = Mockito.mock(ContainerManager.class); containerTable = scm.getScmMetadataStore().getContainerTable(); scmHADBTransactionBuffer = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java index 37bb4570c5e..cfdb2c30980 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java @@ -26,7 +26,7 @@ .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .CommandStatusReportFromDatanode; @@ -82,7 +82,7 @@ public void testCommandStatusReport() { private CommandStatusReportFromDatanode getStatusReport( List reports) { - CommandStatusReportsProto report = TestUtils.createCommandStatusReport( + CommandStatusReportsProto report = HddsTestUtils.createCommandStatusReport( reports); DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 83a6b6243f9..9f7d9c295f8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -283,11 +283,11 @@ public List getNodes( long capacity = nodeMetricMap.get(dd).getCapacity().get(); long used = nodeMetricMap.get(dd).getScmUsed().get(); long remaining = nodeMetricMap.get(dd).getRemaining().get(); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( di.getUuid(), "/data1-" + di.getUuidString(), capacity, used, remaining, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + di.getUuidString(), capacity, used, remaining, null); di.updateStorageReports(new ArrayList<>(Arrays.asList(storage1))); @@ -532,7 +532,7 @@ public void addContainer(DatanodeDetails dd, @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { - if(commandMap.containsKey(dnId)) { + if (commandMap.containsKey(dnId)) { List commandList = commandMap.get(dnId); Preconditions.checkNotNull(commandList); commandList.add(command); @@ -601,7 +601,7 @@ public int getCommandCount(DatanodeDetails dd) { } public void clearCommandQueue(UUID dnId) { - if(commandMap.containsKey(dnId)) { + if (commandMap.containsKey(dnId)) { commandMap.put(dnId, new LinkedList<>()); } } @@ -799,7 +799,7 @@ public List getNodesByAddress(String address) { if (uuids == null) { return results; } - for(String uuid : uuids) { + for (String uuid : uuids) { DatanodeDetails dn = getNodeByUuid(uuid); if (dn != null) { results.add(dn); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java index c015f185260..3d8551d8fce 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java @@ -82,7 +82,7 @@ public void setNodeStatus(DatanodeDetails dd, NodeStatus status) { */ public void setPipelines(DatanodeDetails dd, int count) { Set pipelines = new HashSet<>(); - for (int i=0; i t1 = executor.submit(() -> fullReportHandler.onMessage(fcr, publisher)); Future t2 = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index ea42292e973..f6c47d32049 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -91,8 +91,8 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE; import static org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; -import static org.apache.hadoop.hdds.scm.TestUtils.getReplicas; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.mockito.Mockito.when; @@ -263,8 +263,9 @@ public void testOpenContainer() throws IOException { containerStateManager.addContainer(container.getProtobuf()); replicationManager.processAll(); eventQueue.processAll(1000); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.OPEN)); Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); - } /** @@ -308,6 +309,8 @@ public void testClosingContainer() throws IOException { eventQueue.processAll(1000); Assert.assertEquals(currentCloseCommandCount + 6, datanodeCommandHandler .getInvocationCount(SCMCommandProto.Type.closeContainerCommand)); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.CLOSING)); } @@ -348,6 +351,8 @@ public void testQuasiClosedContainerWithTwoOpenReplica() throws IOException { Assert.assertTrue(datanodeCommandHandler.received( SCMCommandProto.Type.closeContainerCommand, replicaThree.getDatanodeDetails())); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); } /** @@ -378,6 +383,10 @@ public void testHealthyQuasiClosedContainer() throws IOException { replicationManager.processAll(); eventQueue.processAll(1000); Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); } /** @@ -459,6 +468,14 @@ public void testQuasiClosedContainerWithUnhealthyReplica() Assert.assertEquals(1, replicationManager.getMetrics() .getInflightReplication()); + // We should have one under replicated and one quasi_closed_stuck + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + // Now we add the missing replica back DatanodeDetails targetDn = replicationManager.getInflightReplication() .get(id).get(0).getDatanode(); @@ -482,6 +499,13 @@ public void testQuasiClosedContainerWithUnhealthyReplica() replicationManager.getMetrics().getNumReplicationCmdsCompleted()); Assert.assertEquals(currentBytesCompleted + 100L, replicationManager.getMetrics().getNumReplicationBytesCompleted()); + + report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); } /** @@ -523,6 +547,13 @@ public void testOverReplicatedQuasiClosedContainer() throws IOException { Assert.assertEquals(1, replicationManager.getMetrics() .getInflightDeletion()); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.OVER_REPLICATED)); + // Now we remove the replica according to inflight DatanodeDetails targetDn = replicationManager.getInflightDeletion() .get(id).get(0).getDatanode(); @@ -554,6 +585,13 @@ public void testOverReplicatedQuasiClosedContainer() throws IOException { replicationManager.getMetrics().getNumDeletionCmdsCompleted()); Assert.assertEquals(deleteBytesCompleted + 101, replicationManager.getMetrics().getNumDeletionBytesCompleted()); + + report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.OVER_REPLICATED)); } /** @@ -600,6 +638,13 @@ public void testOverReplicatedQuasiClosedContainerWithUnhealthyReplica() Assert.assertEquals(1, replicationManager.getMetrics() .getInflightDeletion()); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.OVER_REPLICATED)); + final long currentDeleteCommandCompleted = replicationManager.getMetrics() .getNumDeletionCmdsCompleted(); // Now we remove the replica to simulate deletion complete @@ -613,6 +658,13 @@ public void testOverReplicatedQuasiClosedContainerWithUnhealthyReplica() Assert.assertEquals(0, replicationManager.getInflightDeletion().size()); Assert.assertEquals(0, replicationManager.getMetrics() .getInflightDeletion()); + + report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.OVER_REPLICATED)); } /** @@ -652,6 +704,13 @@ public void testUnderReplicatedQuasiClosedContainer() throws IOException { Assert.assertEquals(1, replicationManager.getMetrics() .getInflightReplication()); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + final long currentReplicateCommandCompleted = replicationManager .getMetrics().getNumReplicationCmdsCompleted(); final long currentReplicateBytesCompleted = replicationManager @@ -675,6 +734,13 @@ public void testUnderReplicatedQuasiClosedContainer() throws IOException { Assert.assertEquals(0, replicationManager.getInflightReplication().size()); Assert.assertEquals(0, replicationManager.getMetrics() .getInflightReplication()); + + report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); } /** @@ -740,6 +806,15 @@ public void testUnderReplicatedQuasiClosedContainerWithUnhealthyReplica() id, State.QUASI_CLOSED, 1000L, originNodeId, newNode); containerStateManager.updateContainerReplica(id.getProtobuf(), newReplica); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.UNHEALTHY)); + /* * We have report the replica to SCM, in the next ReplicationManager * iteration it should delete the unhealthy replica. @@ -765,6 +840,15 @@ public void testUnderReplicatedQuasiClosedContainerWithUnhealthyReplica() final long currentDeleteCommandCompleted = replicationManager.getMetrics() .getNumDeletionCmdsCompleted(); + + report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.UNHEALTHY)); /* * We have now removed unhealthy replica, next iteration of * ReplicationManager should re-replicate the container as it @@ -788,6 +872,15 @@ public void testUnderReplicatedQuasiClosedContainerWithUnhealthyReplica() Assert.assertEquals(1, replicationManager.getInflightReplication().size()); Assert.assertEquals(1, replicationManager.getMetrics() .getInflightReplication()); + + report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.UNHEALTHY)); } @@ -820,6 +913,10 @@ public void testQuasiClosedToClosed() throws IOException { Assert.assertEquals(currentCloseCommandCount + 3, datanodeCommandHandler .getInvocationCount(SCMCommandProto.Type.closeContainerCommand)); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.QUASI_CLOSED)); + Assert.assertEquals(0, report.getStat( + ReplicationManagerReport.HealthState.QUASI_CLOSED_STUCK)); } @@ -844,6 +941,13 @@ public void testHealthyClosedContainer() throws IOException { replicationManager.processAll(); eventQueue.processAll(1000); Assert.assertEquals(0, datanodeCommandHandler.getInvocation()); + + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.CLOSED)); + for (ReplicationManagerReport.HealthState s : + ReplicationManagerReport.HealthState.values()) { + Assert.assertEquals(0, report.getStat(s)); + } } /** @@ -871,6 +975,11 @@ public void testUnhealthyOpenContainer() throws IOException { eventQueue.processAll(1000); Mockito.verify(closeContainerHandler, Mockito.times(1)) .onMessage(id, eventQueue); + + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.OPEN)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.OPEN_UNHEALTHY)); } /** @@ -957,6 +1066,11 @@ public void additionalReplicaScheduledWhenMisReplicated() throws IOException { Assert.assertEquals(1, replicationManager.getMetrics() .getInflightReplication()); + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(1, report.getStat(LifeCycleState.CLOSED)); + Assert.assertEquals(1, report.getStat( + ReplicationManagerReport.HealthState.MIS_REPLICATED)); + // Now make it so that all containers seem mis-replicated no matter how // many replicas. This will test replicas are not scheduled if the new // replica does not fix the mis-replication. @@ -1036,6 +1150,7 @@ public void overReplicatedButRemovingMakesMisReplicated() throws IOException { Assert.assertEquals(1, replicationManager.getInflightDeletion().size()); Assert.assertEquals(1, replicationManager.getMetrics() .getInflightDeletion()); + assertOverReplicatedCount(1); } @Test @@ -1077,6 +1192,8 @@ public void testOverReplicatedAndPolicySatisfied() throws IOException { Assert.assertEquals(1, replicationManager.getInflightDeletion().size()); Assert.assertEquals(1, replicationManager.getMetrics() .getInflightDeletion()); + + assertOverReplicatedCount(1); } @Test @@ -1135,6 +1252,7 @@ public void testUnderReplicatedDueToDecommission() throws IOException { addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); assertReplicaScheduled(2); + assertUnderReplicatedCount(1); } /** @@ -1148,6 +1266,7 @@ public void testUnderReplicatedDueToAllDecommission() throws IOException { addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); assertReplicaScheduled(3); + assertUnderReplicatedCount(1); } /** @@ -1162,6 +1281,7 @@ public void testCorrectlyReplicatedWithDecommission() throws IOException { addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(DECOMMISSIONING, HEALTHY), CLOSED); assertReplicaScheduled(0); + assertUnderReplicatedCount(0); } /** @@ -1175,6 +1295,7 @@ public void testUnderReplicatedDueToMaintenance() throws IOException { addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); assertReplicaScheduled(1); + assertUnderReplicatedCount(1); } /** @@ -1195,6 +1316,7 @@ public void testNotUnderReplicatedDueToMaintenanceMinRepOne() addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); assertReplicaScheduled(0); + assertUnderReplicatedCount(0); } /** @@ -1215,6 +1337,7 @@ public void testUnderReplicatedDueToMaintenanceMinRepOne() addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); assertReplicaScheduled(1); + assertUnderReplicatedCount(1); } /** @@ -1228,6 +1351,7 @@ public void testUnderReplicatedDueToAllMaintenance() throws IOException { addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); assertReplicaScheduled(2); + assertUnderReplicatedCount(1); } /** @@ -1242,6 +1366,7 @@ public void testCorrectlyReplicatedWithMaintenance() throws IOException { addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); assertReplicaScheduled(0); + assertUnderReplicatedCount(0); } /** @@ -1257,8 +1382,21 @@ public void testUnderReplicatedWithDecommissionAndMaintenance() addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED); assertReplicaScheduled(2); + assertUnderReplicatedCount(1); } + /** + * ReplicationManager should replicate zero replica when all copies + * are missing. + */ + @Test + public void testContainerWithMissingReplicas() + throws IOException { + createContainer(LifeCycleState.CLOSED); + assertReplicaScheduled(0); + assertUnderReplicatedCount(1); + assertMissingCount(1); + } /** * When a CLOSED container is over replicated, ReplicationManager * deletes the excess replicas. While choosing the replica for deletion @@ -1302,6 +1440,7 @@ public void testOverReplicatedClosedContainerWithDecomAndMaint() SCMCommandProto.Type.deleteContainerCommand, r.getDatanodeDetails())); } + assertOverReplicatedCount(1); } /** @@ -1319,6 +1458,7 @@ public void testUnderReplicatedNotHealthySource() throws IOException { // There should be replica scheduled, but as all nodes are stale, nothing // gets scheduled. assertReplicaScheduled(0); + assertUnderReplicatedCount(1); } /** @@ -1723,6 +1863,7 @@ public void testDeleteCommandTimeout() throws private ContainerInfo createContainer(LifeCycleState containerState) throws IOException { final ContainerInfo container = getContainer(containerState); + container.setUsedBytes(1234); containerStateManager.addContainer(container.getProtobuf()); return container; } @@ -1791,6 +1932,24 @@ private void assertDeleteScheduled(int delta) throws InterruptedException { replicationManager.getMetrics().getNumDeletionCmdsSent()); } + private void assertUnderReplicatedCount(int count) { + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(count, report.getStat( + ReplicationManagerReport.HealthState.UNDER_REPLICATED)); + } + + private void assertMissingCount(int count) { + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(count, report.getStat( + ReplicationManagerReport.HealthState.MISSING)); + } + + private void assertOverReplicatedCount(int count) { + ReplicationManagerReport report = replicationManager.getContainerReport(); + Assert.assertEquals(count, report.getStat( + ReplicationManagerReport.HealthState.OVER_REPLICATED)); + } + @After public void teardown() throws Exception { containerStateManager.close(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 3bb92632465..9333b172ac0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdds.scm.container; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java index a4776e9504d..b23482b9157 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/TestContainerBalancer.java @@ -105,11 +105,11 @@ public void setup() throws SCMException, NodeNotFoundException { balancerConfiguration = conf.getObject(ContainerBalancerConfiguration.class); - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setIdleIteration(1); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setIterations(1); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); balancerConfiguration.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); - balancerConfiguration.setMaxSizeEnteringTarget(5 * OzoneConsts.GB); + balancerConfiguration.setMaxSizeEnteringTarget(50 * OzoneConsts.GB); conf.setFromObject(balancerConfiguration); GenericTestUtils.setLogLevel(ContainerBalancer.LOG, Level.DEBUG); @@ -176,7 +176,7 @@ public void testCalculationOfUtilization() { // check for random threshold values for (int i = 0; i < 50; i++) { - double randomThreshold = RANDOM.nextDouble(); + double randomThreshold = RANDOM.nextDouble() * 100; balancerConfiguration.setThreshold(randomThreshold); containerBalancer.start(balancerConfiguration); @@ -186,7 +186,7 @@ public void testCalculationOfUtilization() { // modify this after balancer is fully completed try { Thread.sleep(100); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } expectedUnBalancedNodes = determineExpectedUnBalancedNodes(randomThreshold); @@ -211,7 +211,7 @@ public void testCalculationOfUtilization() { */ @Test public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced() { - balancerConfiguration.setThreshold(0.99); + balancerConfiguration.setThreshold(99.99); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -219,7 +219,7 @@ public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced() { // modify this after balancer is fully completed try { Thread.sleep(100); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Assert.assertEquals(0, containerBalancer.getUnBalancedNodes().size()); @@ -231,10 +231,12 @@ public void unBalancedNodesListShouldBeEmptyWhenClusterIsBalanced() { */ @Test public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit() { - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(0.3d); + int percent = 20; + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration( + percent); balancerConfiguration.setMaxSizeToMovePerIteration(100 * OzoneConsts.GB); - balancerConfiguration.setThreshold(0.01); - balancerConfiguration.setIdleIteration(1); + balancerConfiguration.setThreshold(1); + balancerConfiguration.setIterations(1); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -242,11 +244,11 @@ public void containerBalancerShouldObeyMaxDatanodesToInvolveLimit() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } Assert.assertFalse( containerBalancer.getCountDatanodesInvolvedPerIteration() > - (int) (0.3 * numberOfNodes)); + (percent * numberOfNodes / 100)); containerBalancer.stop(); } @@ -256,7 +258,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers() { for (ContainerInfo containerInfo : cidToInfoMap.values()) { containerInfo.setState(HddsProtos.LifeCycleState.OPEN); } - balancerConfiguration.setThreshold(0.1); + balancerConfiguration.setThreshold(10); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -264,7 +266,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); @@ -284,7 +286,7 @@ public void containerBalancerShouldSelectOnlyClosedContainers() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); // check whether all selected containers are closed @@ -298,9 +300,9 @@ public void containerBalancerShouldSelectOnlyClosedContainers() { @Test public void containerBalancerShouldObeyMaxSizeToMoveLimit() { - balancerConfiguration.setThreshold(0.01); + balancerConfiguration.setThreshold(1); balancerConfiguration.setMaxSizeToMovePerIteration(10 * OzoneConsts.GB); - balancerConfiguration.setIdleIteration(1); + balancerConfiguration.setIterations(1); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -308,7 +310,7 @@ public void containerBalancerShouldObeyMaxSizeToMoveLimit() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } // balancer should not have moved more size than the limit Assert.assertFalse(containerBalancer.getSizeMovedPerIteration() > @@ -318,9 +320,9 @@ public void containerBalancerShouldObeyMaxSizeToMoveLimit() { @Test public void targetDatanodeShouldNotAlreadyContainSelectedContainer() { - balancerConfiguration.setThreshold(0.1); + balancerConfiguration.setThreshold(10); balancerConfiguration.setMaxSizeToMovePerIteration(100 * OzoneConsts.GB); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -328,7 +330,7 @@ public void targetDatanodeShouldNotAlreadyContainSelectedContainer() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Map sourceToTargetMap = @@ -345,9 +347,9 @@ public void targetDatanodeShouldNotAlreadyContainSelectedContainer() { @Test public void containerMoveSelectionShouldFollowPlacementPolicy() { - balancerConfiguration.setThreshold(0.1); + balancerConfiguration.setThreshold(10); balancerConfiguration.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -355,7 +357,7 @@ public void containerMoveSelectionShouldFollowPlacementPolicy() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Map sourceToTargetMap = @@ -387,10 +389,10 @@ public void containerMoveSelectionShouldFollowPlacementPolicy() { @Test public void targetDatanodeShouldBeInServiceHealthy() throws NodeNotFoundException { - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); balancerConfiguration.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); - balancerConfiguration.setMaxSizeEnteringTarget(5 * OzoneConsts.GB); + balancerConfiguration.setMaxSizeEnteringTarget(50 * OzoneConsts.GB); containerBalancer.start(balancerConfiguration); // waiting for balance completed. @@ -414,10 +416,10 @@ public void targetDatanodeShouldBeInServiceHealthy() @Test public void selectedContainerShouldNotAlreadyHaveBeenSelected() { - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); balancerConfiguration.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); - balancerConfiguration.setMaxSizeEnteringTarget(5 * OzoneConsts.GB); + balancerConfiguration.setMaxSizeEnteringTarget(50 * OzoneConsts.GB); containerBalancer.start(balancerConfiguration); @@ -426,7 +428,7 @@ public void selectedContainerShouldNotAlreadyHaveBeenSelected() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Set containers = new HashSet<>(); @@ -440,10 +442,10 @@ public void selectedContainerShouldNotAlreadyHaveBeenSelected() { @Test public void balancerShouldNotSelectConfiguredExcludeContainers() { - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); balancerConfiguration.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); - balancerConfiguration.setMaxSizeEnteringTarget(5 * OzoneConsts.GB); + balancerConfiguration.setMaxSizeEnteringTarget(50 * OzoneConsts.GB); balancerConfiguration.setExcludeContainers("1, 4, 5"); containerBalancer.start(balancerConfiguration); @@ -453,7 +455,7 @@ public void balancerShouldNotSelectConfiguredExcludeContainers() { // modify this after balancer is fully completed try { Thread.sleep(1000); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); Set excludeContainers = @@ -467,26 +469,15 @@ public void balancerShouldNotSelectConfiguredExcludeContainers() { @Test public void balancerShouldObeyMaxSizeEnteringTargetLimit() { - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0d); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); balancerConfiguration.setMaxSizeToMovePerIteration(50 * OzoneConsts.GB); // no containers should be selected when the limit is zero balancerConfiguration.setMaxSizeEnteringTarget(0); - containerBalancer.start(balancerConfiguration); - - // waiting for balance completed. - // TODO: this is a temporary implementation for now - // modify this after balancer is fully completed - try { - Thread.sleep(500); - } catch (InterruptedException e) {} + boolean startResult = containerBalancer.start(balancerConfiguration); - containerBalancer.stop(); - // balancer should have identified unbalanced nodes - Assert.assertFalse(containerBalancer.getUnBalancedNodes().isEmpty()); - // no container should have been selected - Assert.assertTrue(containerBalancer.getSourceToTargetMap().isEmpty()); + Assert.assertFalse(startResult); // some containers should be selected when using default values OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); @@ -499,7 +490,7 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit() { // modify this after balancer is fully completed try { Thread.sleep(500); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); // balancer should have identified unbalanced nodes @@ -509,11 +500,11 @@ public void balancerShouldObeyMaxSizeEnteringTargetLimit() { @Test public void testMetrics() { - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setIdleIteration(1); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setIterations(1); balancerConfiguration.setMaxSizeEnteringTarget(10 * OzoneConsts.GB); balancerConfiguration.setMaxSizeToMovePerIteration(100 * OzoneConsts.GB); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); containerBalancer.start(balancerConfiguration); @@ -522,7 +513,7 @@ public void testMetrics() { // modify this after balancer is fully completed try { Thread.sleep(500); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } containerBalancer.stop(); ContainerBalancerMetrics metrics = containerBalancer.getMetrics(); @@ -544,11 +535,11 @@ public void testMetrics() { */ @Test public void balancerShouldFollowExcludeAndIncludeDatanodesConfigurations() { - balancerConfiguration.setThreshold(0.1); - balancerConfiguration.setIdleIteration(1); + balancerConfiguration.setThreshold(10); + balancerConfiguration.setIterations(1); balancerConfiguration.setMaxSizeEnteringTarget(10 * OzoneConsts.GB); balancerConfiguration.setMaxSizeToMovePerIteration(100 * OzoneConsts.GB); - balancerConfiguration.setMaxDatanodesRatioToInvolvePerIteration(1.0); + balancerConfiguration.setMaxDatanodesPercentageToInvolvePerIteration(100); // only these nodes should be included // the ones also specified in excludeNodes should be excluded @@ -599,28 +590,30 @@ public void testContainerBalancerConfiguration() { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); ozoneConfiguration.set("ozone.scm.container.size", "5GB"); ozoneConfiguration.setDouble( - "hdds.container.balancer.utilization.threshold", 0.01); + "hdds.container.balancer.utilization.threshold", 1); ContainerBalancerConfiguration cbConf = ozoneConfiguration.getObject(ContainerBalancerConfiguration.class); - Assert.assertEquals(cbConf.getThreshold(), 0.01d, 0.001); + Assert.assertEquals(1, cbConf.getThreshold(), 0.001); - Assert.assertEquals(cbConf.getMaxSizeLeavingSource(), - 26 * 1024 * 1024 * 1024L); + Assert.assertEquals(26 * 1024 * 1024 * 1024L, + cbConf.getMaxSizeLeavingSource()); - Assert.assertEquals(cbConf.getMoveTimeout().toMillis(), 30 * 60 * 1000); + Assert.assertEquals(30 * 60 * 1000, + cbConf.getMoveTimeout().toMillis()); } /** * Determines unBalanced nodes, that is, over and under utilized nodes, * according to the generated utilization values for nodes and the threshold. * - * @param threshold A fraction from range 0 to 1. + * @param threshold A percentage in the range 0 to 100 * @return List of DatanodeUsageInfo containing the expected(correct) * unBalanced nodes. */ private List determineExpectedUnBalancedNodes( double threshold) { + threshold /= 100; double lowerLimit = averageUtilization - threshold; double upperLimit = averageUtilization + threshold; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java index 1f0853d25c7..4ed2887ccb6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; @@ -97,11 +97,11 @@ public void testRackAwarePolicy() throws IOException { datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -114,19 +114,19 @@ public void testRackAwarePolicy() throws IOException { dnInfos.add(datanodeInfo); } - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null); dnInfos.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null); dnInfos.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); - StorageReportProto storage4 = TestUtils.createStorageReport( + StorageReportProto storage4 = HddsTestUtils.createStorageReport( dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index bc88e9763fb..ffe658e9c9e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; @@ -63,11 +63,11 @@ public void chooseDatanodes() throws SCMException { NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -78,19 +78,19 @@ public void chooseDatanodes() throws SCMException { datanodes.add(datanodeInfo); } - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( datanodes.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), 100L, 90L, 10L, null); datanodes.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( datanodes.get(3).getUuid(), "/data1-" + datanodes.get(3).getUuidString(), 100L, 80L, 20L, null); datanodes.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); - StorageReportProto storage4 = TestUtils.createStorageReport( + StorageReportProto storage4 = HddsTestUtils.createStorageReport( datanodes.get(4).getUuid(), "/data1-" + datanodes.get(4).getUuidString(), 100L, 70L, 30L, null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index eba7703b46f..a830a71b4ac 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetConstants; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -119,11 +119,11 @@ public void setup() { datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -137,39 +137,39 @@ public void setup() { } if (datanodeCount > 4) { - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null); dnInfos.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null); dnInfos.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); - StorageReportProto storage4 = TestUtils.createStorageReport( + StorageReportProto storage4 = HddsTestUtils.createStorageReport( dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null); dnInfos.get(4).updateStorageReports( new ArrayList<>(Arrays.asList(storage4))); } else if (datanodeCount > 3) { - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null); dnInfos.get(2).updateStorageReports( new ArrayList<>(Arrays.asList(storage2))); - StorageReportProto storage3 = TestUtils.createStorageReport( + StorageReportProto storage3 = HddsTestUtils.createStorageReport( dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null); dnInfos.get(3).updateStorageReports( new ArrayList<>(Arrays.asList(storage3))); } else if (datanodeCount > 2) { - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 84L, 16L, null); @@ -438,11 +438,11 @@ public void testDatanodeWithDefaultNetworkLocation() throws SCMException { dn, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( dnInfo.getUuid(), "/data1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + dnInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null); dnInfo.updateStorageReports( @@ -550,7 +550,7 @@ public void testOutOfServiceNodesNotSelected() { dn.setNodeStatus(new NodeStatus(DECOMMISSIONED, HEALTHY)); } - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { // Set a random DN to in_service and ensure it is always picked int index = new Random().nextInt(dnInfos.size()); dnInfos.get(index).setNodeStatus(NodeStatus.inServiceHealthy()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index c941111704a..9ad03d1647d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -63,11 +63,11 @@ public void chooseDatanodes() throws SCMException { NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -78,7 +78,7 @@ public void chooseDatanodes() throws SCMException { datanodes.add(datanodeInfo); } - StorageReportProto storage2 = TestUtils.createStorageReport( + StorageReportProto storage2 = HddsTestUtils.createStorageReport( datanodes.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), 100L, 90L, 10L, null); @@ -172,11 +172,11 @@ public void testIsValidNode() throws SCMException { NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto()); - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); MetadataStorageReportProto metaStorage1 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null); datanodeInfo.updateStorageReports( @@ -187,7 +187,7 @@ public void testIsValidNode() throws SCMException { datanodes.add(datanodeInfo); } - StorageReportProto storage1 = TestUtils.createStorageReport( + StorageReportProto storage1 = HddsTestUtils.createStorageReport( datanodes.get(1).getUuid(), "/data1-" + datanodes.get(1).getUuidString(), 100L, 90L, 10L, null); @@ -195,7 +195,7 @@ public void testIsValidNode() throws SCMException { new ArrayList<>(Arrays.asList(storage1))); MetadataStorageReportProto metaStorage2 = - TestUtils.createMetadataStorageReport( + HddsTestUtils.createMetadataStorageReport( "/metadata1-" + datanodes.get(2).getUuidString(), 100L, 90, 10L, null); datanodes.get(2).updateMetaDataStorageReports( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java new file mode 100644 index 00000000000..60ebbb0b866 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManagerMetrics.java @@ -0,0 +1,97 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.container.replication; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ReplicationManager; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import static org.apache.hadoop.test.MetricsAsserts.getLongGauge; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; + +/** + * Tests for the ReplicationManagerMetrics class. + */ +public class TestReplicationManagerMetrics { + + private ReplicationManager replicationManager; + private ReplicationManagerMetrics metrics; + + @Before + public void setup() { + ReplicationManagerReport report = new ReplicationManagerReport(); + + // Each lifecycle state has a value from 1 to N. Set the value of the metric + // to the value by incrementing the counter that number of times. + for (HddsProtos.LifeCycleState s : HddsProtos.LifeCycleState.values()) { + for (int i = 0; i < s.getNumber(); i++) { + report.increment(s); + } + } + // The ordinal starts from 0, so each state will have a value of its ordinal + for (ReplicationManagerReport.HealthState s : + ReplicationManagerReport.HealthState.values()) { + for (int i = 0; i < s.ordinal(); i++) { + report.increment(s); + } + } + replicationManager = Mockito.mock(ReplicationManager.class); + Mockito.when(replicationManager.getContainerReport()).thenReturn(report); + metrics = ReplicationManagerMetrics.create(replicationManager); + } + + @After + public void after() { + metrics.unRegister(); + } + + @Test + public void testLifeCycleStateMetricsPresent() { + Assert.assertEquals(HddsProtos.LifeCycleState.OPEN.getNumber(), + getGauge("NumOpenContainers")); + Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING.getNumber(), + getGauge("NumClosingContainers")); + Assert.assertEquals(HddsProtos.LifeCycleState.QUASI_CLOSED.getNumber(), + getGauge("NumQuasiClosedContainers")); + Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED.getNumber(), + getGauge("NumClosedContainers")); + Assert.assertEquals(HddsProtos.LifeCycleState.DELETING.getNumber(), + getGauge("NumDeletingContainers")); + Assert.assertEquals(HddsProtos.LifeCycleState.DELETED.getNumber(), + getGauge("NumDeletedContainers")); + } + + @Test + public void testHealthStateMetricsPresent() { + for (ReplicationManagerReport.HealthState s : + ReplicationManagerReport.HealthState.values()) { + Assert.assertEquals(s.ordinal(), getGauge(s.getMetricName())); + } + } + + private long getGauge(String metricName) { + return getLongGauge(metricName, + getMetrics(ReplicationManagerMetrics.METRICS_SOURCE_NAME)); + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java index 69b038cf09d..405da0f6aa0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java @@ -431,6 +431,16 @@ public void testIsHealthyWithMaintReplicaIsHealthy() { assertTrue(rcnt.isHealthy()); } + @Test + public void testContainerWithNoReplicasIsMissing() { + Set replica = new HashSet<>(); + ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED); + ContainerReplicaCount rcnt = + new ContainerReplicaCount(container, replica, 0, 0, 3, 2); + assertTrue(rcnt.isMissing()); + assertFalse(rcnt.isSufficientlyReplicated()); + } + private void validate(ContainerReplicaCount rcnt, boolean sufficientlyReplicated, int replicaDelta, boolean overReplicated) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java index 1fda9c1d58d..ab32e3688e8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/crl/TestCRLStatusReportHandler.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CRLStatusReport; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.server.SCMCertStore; @@ -125,7 +125,7 @@ private CRLStatusReportFromDatanode getCRLStatusReport( List pendingCRLIds, long receivedCRLId) { CRLStatusReport crlStatusReportProto = - TestUtils.createCRLStatusReport(pendingCRLIds, receivedCRLId); + HddsTestUtils.createCRLStatusReport(pendingCRLIds, receivedCRLId); return new CRLStatusReportFromDatanode(dn, crlStatusReportProto); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java index 9ec10fc9138..5f82cce5329 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java @@ -64,7 +64,7 @@ public void testSCMHAConfig() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); String[] nodes = new String[] {"scm1", "scm2", "scm3"}; - conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId, + conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId, "scm1,scm2,scm3"); conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); @@ -72,14 +72,14 @@ public void testSCMHAConfig() throws Exception { int i = 1; for (String nodeId : nodes) { conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_SECURITY_SERVICE_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes( @@ -87,26 +87,26 @@ public void testSCMHAConfig() throws Exception { "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.setInt(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_PORT_KEY, scmServiceId, nodeId), port); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY, - scmServiceId, nodeId), "localhost:"+port++); + scmServiceId, nodeId), "localhost:" + port++); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_BIND_HOST_KEY, scmServiceId, nodeId), "172.28.9.1"); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_DB_DIRS, - scmServiceId, nodeId), "/var/scm-metadata"+ i++); + scmServiceId, nodeId), "/var/scm-metadata" + i++); conf.set(ConfUtils.addKeySuffixes(OZONE_SCM_ADDRESS_KEY, scmServiceId, nodeId), "localhost"); @@ -121,7 +121,7 @@ public void testSCMHAConfig() throws Exception { port = 9880; // Validate configs. - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, @@ -132,7 +132,7 @@ public void testSCMHAConfig() throws Exception { scmServiceId, "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes( OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, conf.getInt(ConfUtils.addKeySuffixes( @@ -142,7 +142,7 @@ public void testSCMHAConfig() throws Exception { OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY, scmServiceId, "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, @@ -152,7 +152,7 @@ public void testSCMHAConfig() throws Exception { ConfUtils.addKeySuffixes(OZONE_SCM_CLIENT_BIND_HOST_KEY, scmServiceId, "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_DATANODE_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals(port, @@ -163,7 +163,7 @@ public void testSCMHAConfig() throws Exception { "scm1"))); - Assert.assertEquals("localhost:"+port++, + Assert.assertEquals("localhost:" + port++, conf.get(ConfUtils.addKeySuffixes(OZONE_SCM_HTTP_ADDRESS_KEY, scmServiceId, "scm1"))); Assert.assertEquals("172.28.9.1", @@ -192,7 +192,7 @@ public void testHAWithSamePortConfig() throws Exception { conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); String[] nodes = new String[] {"scm1", "scm2", "scm3"}; - conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId, + conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId, "scm1,scm2,scm3"); conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java index f5913aa28e8..c352ca463f2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java @@ -49,7 +49,7 @@ public void testEncodeAndDecodeSuccess() throws Exception { } @Test(expected = InvalidProtocolBufferException.class) - public void testEncodeWithNonProto() throws Exception{ + public void testEncodeWithNonProto() throws Exception { PipelineID pipelineID = PipelineID.randomId(); // Non proto args Object[] args = new Object[] {pipelineID}; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java index 5543be5832b..834b539ae65 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestPipelineIDCodec.java @@ -57,7 +57,7 @@ public void testPersistingFFAsUUID() throws Exception { @Test public void testPersistingARandomUUID() throws Exception { - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { UUID uuid = UUID.randomUUID(); long mask = 0x0000_0000_0000_00FFL; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 773713f3dbe..63ecb29f038 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -34,7 +34,7 @@ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -189,8 +189,8 @@ public void testContainerPlacementCapacity() throws IOException, SCMNodeManager scmNodeManager = createNodeManager(conf); containerManager = createContainerManager(); - List datanodes = - TestUtils.getListOfRegisteredDatanodeDetails(scmNodeManager, nodeCount); + List datanodes = HddsTestUtils + .getListOfRegisteredDatanodeDetails(scmNodeManager, nodeCount); XceiverClientManager xceiverClientManager = null; LayoutVersionManager versionManager = scmNodeManager.getLayoutVersionManager(); @@ -212,7 +212,7 @@ public void testContainerPlacementCapacity() throws IOException, assertEquals(remaining * nodeCount, (long) scmNodeManager.getStats().getRemaining().get()); - xceiverClientManager= new XceiverClientManager(conf); + xceiverClientManager = new XceiverClientManager(conf); ContainerInfo container = containerManager .allocateContainer( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 80b72409295..8c69116fdf2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -105,7 +105,7 @@ public void testNodeCanBeQueuedAndCancelled() { */ @Test public void testClosePipelinesEventFiredWhenAdminStarted() - throws NodeNotFoundException{ + throws NodeNotFoundException { DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails(); nodeManager.register(dn1, new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING, @@ -425,7 +425,7 @@ public void testCancelledNodesMovedToInService() */ private Set generateContainers(int count) { Set containers = new HashSet<>(); - for (int i=0; i pipelineManager.getPipelines(new RatisReplicationConfig(THREE)) .size() > 3); - TestUtils.openAllRatisPipelines(pipelineManager); + HddsTestUtils.openAllRatisPipelines(pipelineManager); ContainerInfo container1 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); ContainerInfo container2 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); ContainerInfo container3 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); ContainerInfo container4 = - TestUtils.allocateContainer(containerManager); + HddsTestUtils.allocateContainer(containerManager); registerContainers(datanode1, container1, container2, container4); registerContainers(datanode2, container1, container2); @@ -212,9 +212,10 @@ public void testOnMessage() throws Exception { registerReplicas(containerManager, container3, datanode3); registerReplicas(containerManager, container4, datanode1); - TestUtils.closeContainer(containerManager, container1.containerID()); - TestUtils.closeContainer(containerManager, container2.containerID()); - TestUtils.quasiCloseContainer(containerManager, container3.containerID()); + HddsTestUtils.closeContainer(containerManager, container1.containerID()); + HddsTestUtils.closeContainer(containerManager, container2.containerID()); + HddsTestUtils.quasiCloseContainer(containerManager, + container3.containerID()); // First set the node to IN_MAINTENANCE and ensure the container replicas // are not removed on the dead event diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 1b84be7b307..792d62f4c1b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -94,12 +94,12 @@ public void testHostStringsParseCorrectly() @Test public void testAnyInvalidHostThrowsException() - throws InvalidHostStringException{ + throws InvalidHostStringException { List dns = generateDatanodes(); // Try to decommission a host that does exist, but give incorrect port try { - decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()+":10")); + decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress() + ":10")); fail("InvalidHostStringException expected"); } catch (InvalidHostStringException e) { } @@ -131,7 +131,7 @@ public void testAnyInvalidHostThrowsException() // that does not exist try { decom.decommissionNodes(Arrays.asList( - dns.get(0).getIpAddress()+":10")); + dns.get(0).getIpAddress() + ":10")); fail("InvalidHostStringException expected"); } catch (InvalidHostStringException e) { } @@ -159,7 +159,7 @@ public void testNodesCanBeDecommissionedAndRecommissioned() // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = - multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue(); + multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); decom.decommissionNodes(Arrays.asList(multiAddr)); assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -202,7 +202,7 @@ public void testNodesCanBePutIntoMaintenanceAndRecommissioned() // and we hardcoded ports to 3456, 4567, 5678 DatanodeDetails multiDn = dns.get(10); String multiAddr = - multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue(); + multiDn.getIpAddress() + ":" + multiDn.getPorts().get(0).getValue(); decom.startMaintenanceNodes(Arrays.asList(multiAddr), 100); assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, nodeManager.getNodeStatus(multiDn).getOperationalState()); @@ -282,7 +282,7 @@ public void testNodeDecommissionManagerOnBecomeLeader() throws Exception { private SCMNodeManager createNodeManager(OzoneConfiguration config) throws IOException, AuthenticationException { - scm = TestUtils.getScm(config); + scm = HddsTestUtils.getScm(config); return (SCMNodeManager) scm.getScmNodeManager(); } @@ -296,7 +296,7 @@ private SCMNodeManager createNodeManager(OzoneConfiguration config) */ private List generateDatanodes() { List dns = new ArrayList<>(); - for (int i=0; i<10; i++) { + for (int i = 0; i < 10; i++) { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); dns.add(dn); nodeManager.register(dn, null, null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java index 436a1e8aa7f..41b759c58cb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java @@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopology; @@ -84,9 +84,9 @@ public void resetEventCollector() throws IOException { @Test public void testNodeReport() throws IOException { DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails(); - StorageReportProto storageOne = TestUtils + StorageReportProto storageOne = HddsTestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); - MetadataStorageReportProto metaStorageOne = TestUtils + MetadataStorageReportProto metaStorageOne = HddsTestUtils .createMetadataStorageReport(metaStoragePath, 100, 10, 90, null); SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn); @@ -100,7 +100,7 @@ public void testNodeReport() throws IOException { Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90); Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10); - StorageReportProto storageTwo = TestUtils + StorageReportProto storageTwo = HddsTestUtils .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null); nodeReportHandler.onMessage( getNodeReport(dn, Arrays.asList(storageOne, storageTwo), @@ -117,7 +117,7 @@ private NodeReportFromDatanode getNodeReport(DatanodeDetails dn, List reports, List metaReports) { NodeReportProto nodeReportProto = - TestUtils.createNodeReport(reports, metaReports); + HddsTestUtils.createNodeReport(reports, metaReports); return new NodeReportFromDatanode(dn, nodeReportProto); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java index da87d1861b0..31ecbf6fff8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeStateManager.java @@ -304,7 +304,7 @@ public Event getLastEvent() { if (events.size() == 0) { return null; } else { - return events.get(events.size()-1); + return events.get(events.size() - 1); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 259b5d70309..ea8a184acff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -52,7 +52,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -97,16 +97,9 @@ import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.finalizeNewLayoutVersionCommand; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted; import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; -import static org.apache.hadoop.hdds.scm.TestUtils.getRandomPipelineReports; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getRandomPipelineReports; import static org.apache.hadoop.hdds.scm.events.SCMEvents.*; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; import static org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto; import static org.junit.Assert.assertEquals; @@ -182,6 +175,7 @@ OzoneConfiguration getConf() { TimeUnit.MILLISECONDS); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); return conf; } @@ -195,7 +189,7 @@ OzoneConfiguration getConf() { SCMNodeManager createNodeManager(OzoneConfiguration config) throws IOException, AuthenticationException { - scm = TestUtils.getScm(config); + scm = HddsTestUtils.getScm(config); scmContext = new SCMContext.Builder().setIsInSafeMode(true) .setLeader(true).setIsPreCheckComplete(true) .setSCM(scm).build(); @@ -226,7 +220,7 @@ public void testScmHeartbeat() int registeredNodes = 5; // Send some heartbeats from different nodes. for (int x = 0; x < registeredNodes; x++) { - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(datanodeDetails, layoutInfo); } @@ -296,15 +290,15 @@ private DatanodeDetails registerWithCapacity(SCMNodeManager nodeManager, DatanodeDetails details = MockDatanodeDetails.randomDatanodeDetails(); StorageReportProto storageReport = - TestUtils.createStorageReport(details.getUuid(), + HddsTestUtils.createStorageReport(details.getUuid(), details.getNetworkFullPath(), Long.MAX_VALUE); MetadataStorageReportProto metadataStorageReport = - TestUtils.createMetadataStorageReport(details.getNetworkFullPath(), + HddsTestUtils.createMetadataStorageReport(details.getNetworkFullPath(), Long.MAX_VALUE); RegisteredCommand cmd = nodeManager.register( MockDatanodeDetails.randomDatanodeDetails(), - TestUtils.createNodeReport(Arrays.asList(storageReport), + HddsTestUtils.createNodeReport(Arrays.asList(storageReport), Arrays.asList(metadataStorageReport)), getRandomPipelineReports(), layout); @@ -463,7 +457,7 @@ private void assertPipelines(HddsProtos.ReplicationFactor factor, // these pipelines use nodes outside of allowedDNs. if (success) { for (Pipeline pipeline: pipelines) { - for(DatanodeDetails pipelineDN: pipeline.getNodes()) { + for (DatanodeDetails pipelineDN: pipeline.getNodes()) { // Do not wait for this condition to be true. Disallowed DNs should // never be used once we have the expected number of pipelines. if (!allowedDnIds.contains(pipelineDN.getUuidString())) { @@ -516,7 +510,7 @@ public void testScmShutdown() conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS); SCMNodeManager nodeManager = createNodeManager(conf); - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); LayoutVersionProto layoutInfo = toLayoutVersionProto( @@ -555,7 +549,7 @@ public void testScmHealthyNodeCount() versionManager.getSoftwareLayoutVersion()); for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(datanodeDetails, layoutInfo); } @@ -615,7 +609,7 @@ public void testSetNodeOpStateAndCommandFired() conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails dn = TestUtils.createRandomDatanodeAndRegister( + DatanodeDetails dn = HddsTestUtils.createRandomDatanodeAndRegister( nodeManager); LayoutVersionManager versionManager = @@ -683,7 +677,7 @@ public void testScmDetectStaleAndDeadNode() List nodeList = createNodeSet(nodeManager, nodeCount); - DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister( + DatanodeDetails staleNode = HddsTestUtils.createRandomDatanodeAndRegister( nodeManager); // Heartbeat once @@ -787,9 +781,9 @@ public void testScmHandleJvmPause() versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion()); DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails node2 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(node1, layoutInfo); nodeManager.processHeartbeat(node2, layoutInfo); @@ -863,7 +857,7 @@ public void testProcessLayoutVersionReportHigherMlv() throws IOException, try (SCMNodeManager nodeManager = createNodeManager(conf)) { DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(SCMNodeManager.LOG); int scmMlv = @@ -890,7 +884,7 @@ public void testProcessLayoutVersionLowerMlv() throws IOException { scmStorageConfig, eventPublisher, new NetworkTopologyImpl(conf), SCMContext.emptyContext(), lvm); DatanodeDetails node1 = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); verify(eventPublisher, times(1)).fireEvent(NEW_NODE, node1); int scmMlv = @@ -996,11 +990,11 @@ public void testScmClusterIsInExpectedState1() versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion()); DatanodeDetails healthyNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails staleNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); DatanodeDetails deadNode = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); nodeManager.processHeartbeat(healthyNode, layoutInfo); nodeManager.processHeartbeat(staleNode, layoutInfo); nodeManager.processHeartbeat(deadNode, layoutInfo); @@ -1136,7 +1130,7 @@ private List createNodeSet(SCMNodeManager nodeManager, int count) { List list = new ArrayList<>(); for (int x = 0; x < count; x++) { - DatanodeDetails datanodeDetails = TestUtils + DatanodeDetails datanodeDetails = HddsTestUtils .createRandomDatanodeAndRegister(nodeManager); list.add(datanodeDetails); } @@ -1352,9 +1346,9 @@ public void testScmStatsFromNodeReport() UUID dnId = dn.getUuid(); long free = capacity - used; String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils + StorageReportProto report = HddsTestUtils .createStorageReport(dnId, storagePath, capacity, used, free, null); - nodeManager.register(dn, TestUtils.createNodeReport( + nodeManager.register(dn, HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()), null); nodeManager.processHeartbeat(dn, layoutInfo); } @@ -1402,12 +1396,12 @@ public void tesVolumeInfoFromNodeReport() boolean failed = true; for (int x = 0; x < volumeCount; x++) { String storagePath = testDir.getAbsolutePath() + "/" + dnId; - reports.add(TestUtils + reports.add(HddsTestUtils .createStorageReport(dnId, storagePath, capacity, used, free, null, failed)); failed = !failed; } - nodeManager.register(dn, TestUtils.createNodeReport(reports, + nodeManager.register(dn, HddsTestUtils.createNodeReport(reports, Collections.emptyList()), null); LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager(); @@ -1451,7 +1445,7 @@ public void testScmNodeReportUpdate() try (SCMNodeManager nodeManager = createNodeManager(conf)) { DatanodeDetails datanodeDetails = - TestUtils.createRandomDatanodeAndRegister(nodeManager); + HddsTestUtils.createRandomDatanodeAndRegister(nodeManager); NodeReportHandler nodeReportHandler = new NodeReportHandler(nodeManager); EventPublisher publisher = mock(EventPublisher.class); final long capacity = 2000; @@ -1461,10 +1455,10 @@ public void testScmNodeReportUpdate() long scmUsed = x * usedPerHeartbeat; long remaining = capacity - scmUsed; String storagePath = testDir.getAbsolutePath() + "/" + dnId; - StorageReportProto report = TestUtils + StorageReportProto report = HddsTestUtils .createStorageReport(dnId, storagePath, capacity, scmUsed, remaining, null); - NodeReportProto nodeReportProto = TestUtils.createNodeReport( + NodeReportProto nodeReportProto = HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()); nodeReportHandler.onMessage( new NodeReportFromDatanode(datanodeDetails, nodeReportProto), @@ -1589,16 +1583,16 @@ public void testHandlingSCMCommandEvent() UUID dnId = datanodeDetails.getUuid(); String storagePath = testDir.getAbsolutePath() + "/" + dnId; StorageReportProto report = - TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); + HddsTestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null); EventQueue eq = new EventQueue(); try (SCMNodeManager nodemanager = createNodeManager(conf)) { eq.addHandler(DATANODE_COMMAND, nodemanager); nodemanager - .register(datanodeDetails, TestUtils.createNodeReport( + .register(datanodeDetails, HddsTestUtils.createNodeReport( Arrays.asList(report), Collections.emptyList()), - TestUtils.getRandomPipelineReports()); + HddsTestUtils.getRandomPipelineReports()); eq.fireEvent(DATANODE_COMMAND, new CommandForDatanode<>(datanodeDetails.getUuid(), new CloseContainerCommand(1L, @@ -1768,7 +1762,7 @@ public void testGetNodeInfo() final int nodeCount = 6; SCMNodeManager nodeManager = createNodeManager(conf); - for (int i=0; i(datanodes.size()), nodesRequired, 0, 10 * OzoneConsts.TB); Assert.fail("SCMException should have been thrown."); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedMessageSubstring)); } @@ -260,13 +260,13 @@ public void testChooseNodeNotEnoughSpace() throws IOException { new ArrayList<>(datanodes.size()), nodesRequired, 10 * OzoneConsts.TB, 0); Assert.fail("SCMException should have been thrown."); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedMessageSubstring)); } } @Test - public void testPickLowestLoadAnchor() throws IOException{ + public void testPickLowestLoadAnchor() throws IOException { List healthyNodes = nodeManager .getNodes(NodeStatus.inServiceHealthy()); @@ -343,7 +343,7 @@ public void testFallBackPickNodes() { } @Test - public void testRackAwarenessNotEnabledWithFallBack() throws SCMException{ + public void testRackAwarenessNotEnabledWithFallBack() throws SCMException { DatanodeDetails anchor = placementPolicy .chooseNode(nodesWithOutRackAwareness); DatanodeDetails randomNode = placementPolicy @@ -425,12 +425,12 @@ private List overWriteLocationInNodes( } @Test - public void testHeavyNodeShouldBeExcluded() throws SCMException{ + public void testHeavyNodeShouldBeExcluded() throws SCMException { List healthyNodes = nodeManager.getNodes(NodeStatus.inServiceHealthy()); int nodesRequired = HddsProtos.ReplicationFactor.THREE.getNumber(); // only minority of healthy NODES are heavily engaged in pipelines. - int minorityHeavy = healthyNodes.size()/2 - 1; + int minorityHeavy = healthyNodes.size() / 2 - 1; List pickedNodes1 = placementPolicy.chooseDatanodes( new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT), @@ -443,7 +443,7 @@ public void testHeavyNodeShouldBeExcluded() throws SCMException{ Assert.assertTrue(checkDuplicateNodesUUID(pickedNodes1)); // majority of healthy NODES are heavily engaged in pipelines. - int majorityHeavy = healthyNodes.size()/2 + 2; + int majorityHeavy = healthyNodes.size() / 2 + 2; insertHeavyNodesIntoNodeManager(healthyNodes, majorityHeavy); boolean thrown = false; List pickedNodes2 = null; @@ -627,7 +627,7 @@ private Set mockPipelineIDs(int count) { } private void insertHeavyNodesIntoNodeManager( - List nodes, int heavyNodeCount) throws SCMException{ + List nodes, int heavyNodeCount) throws SCMException { if (nodes == null) { throw new SCMException("", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java index d1f383c4462..3d1a707c400 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java @@ -289,7 +289,7 @@ public void testCreatePipelinesWhenNotEnoughSpace() throws Exception { provider.create(new RatisReplicationConfig(factor)); Assert.fail("Expected SCMException for large container size with " + "replication factor " + factor.toString()); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedErrorSubstring)); } } @@ -302,7 +302,7 @@ public void testCreatePipelinesWhenNotEnoughSpace() throws Exception { provider.create(new RatisReplicationConfig(factor)); Assert.fail("Expected SCMException for large metadata size with " + "replication factor " + factor.toString()); - } catch(SCMException ex) { + } catch (SCMException ex) { Assert.assertTrue(ex.getMessage().contains(expectedErrorSubstring)); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index c9840e7b999..83419e67b06 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -133,7 +133,7 @@ public void testOneReplicaPipelineRule() throws Exception { LoggerFactory.getLogger(SCMSafeModeManager.class)); List pipelines = pipelineManager.getPipelines(); - firePipelineEvent(pipelines.subList(0, pipelineFactorThreeCount -1)); + firePipelineEvent(pipelines.subList(0, pipelineFactorThreeCount - 1)); // As 90% of 7 with ceil is 7, if we send 6 pipeline reports, rule // validate should be still false. @@ -144,7 +144,7 @@ public void testOneReplicaPipelineRule() throws Exception { Assert.assertFalse(rule.validate()); //Fire last pipeline event from datanode. - firePipelineEvent(pipelines.subList(pipelineFactorThreeCount -1, + firePipelineEvent(pipelines.subList(pipelineFactorThreeCount - 1, pipelineFactorThreeCount)); GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); @@ -181,13 +181,13 @@ public void testOneReplicaPipelineRuleMixedPipelines() throws Exception { pipelineManager.getPipelines( new RatisReplicationConfig(ReplicationFactor.THREE)); - firePipelineEvent(pipelines.subList(0, pipelineCountThree -1)); + firePipelineEvent(pipelines.subList(0, pipelineCountThree - 1)); GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains( "reported count is 6"), 1000, 5000); //Fire last pipeline event from datanode. - firePipelineEvent(pipelines.subList(pipelineCountThree -1, + firePipelineEvent(pipelines.subList(pipelineCountThree - 1, pipelineCountThree)); GenericTestUtils.waitFor(() -> rule.validate(), 1000, 5000); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index ef6345ef0ec..cb200f2ea84 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -435,7 +435,7 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( }, 100, 1000 * 5); } - private void checkHealthy(int expectedCount) throws Exception{ + private void checkHealthy(int expectedCount) throws Exception { GenericTestUtils.waitFor(() -> scmSafeModeManager .getHealthyPipelineSafeModeRule() .getCurrentHealthyPipelineCount() == expectedCount, @@ -548,14 +548,14 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { assertTrue(scmSafeModeManager.getInSafeMode()); // Register all DataNodes except last one and assert SCM is in safe mode. - for (int i = 0; i < numOfDns-1; i++) { + for (int i = 0; i < numOfDns - 1; i++) { queue.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, HddsTestUtils.createNodeRegistrationContainerReport(containers)); assertTrue(scmSafeModeManager.getInSafeMode()); assertTrue(scmSafeModeManager.getCurrentContainerThreshold() == 1); } - if(numOfDns == 0){ + if (numOfDns == 0) { GenericTestUtils.waitFor(() -> { return scmSafeModeManager.getInSafeMode(); }, 10, 1000 * 10); @@ -586,7 +586,7 @@ public void testSafeModePipelineExitRule() throws Exception { containers.addAll(HddsTestUtils.getContainerInfo(25 * 4)); String storageDir = GenericTestUtils.getTempPath( TestSCMSafeModeManager.class.getName() + UUID.randomUUID()); - try{ + try { MockNodeManager nodeManager = new MockNodeManager(true, 3); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); // enable pipeline check diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index dac832d7ed3..5849398a971 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -63,7 +63,7 @@ public void setUp() throws Exception { SCMConfigurator configurator = new SCMConfigurator(); configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); configurator.setScmContext(SCMContext.emptyContext()); - scm = TestUtils.getScm(config, configurator); + scm = HddsTestUtils.getScm(config, configurator); scm.start(); scm.exitSafeMode(); // add nodes to scm node manager diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java index 5ddbe5532f6..7ad118ca9c4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMCertStore.java @@ -193,7 +193,7 @@ public void testRevokeCertificates() throws Exception { // Generate 3 more certificates and revoke 2 of them List newSerialIDs = new ArrayList<>(); - for (int i = 0; i<3; i++) { + for (int i = 0; i < 3; i++) { X509Certificate cert = generateX509Cert(); scmCertStore.storeValidCertificate(cert.getSerialNumber(), cert, SCM); newSerialIDs.add(cert.getSerialNumber()); @@ -250,7 +250,7 @@ public void testRevokeCertificatesForFutureTime() throws Exception { scmCertStore.storeValidCertificate(serialID, x509Certificate, SCM); Date now = new Date(); // Set revocation time in the future - Date revocationTime = new Date(now.getTime()+500); + Date revocationTime = new Date(now.getTime() + 500); X509CertificateHolder caCertificateHolder = new X509CertificateHolder(generateX509Cert().getEncoded()); @@ -282,7 +282,7 @@ private X509Certificate generateX509Cert() throws Exception { private long getTableSize(Iterator iterator) { long size = 0; - while(iterator.hasNext()) { + while (iterator.hasNext()) { size++; iterator.next(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java index 47495c91ee1..e7b2c575439 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/update/server/TestSCMUpdateServiceGrpcServer.java @@ -57,7 +57,7 @@ public class TestSCMUpdateServiceGrpcServer { public Timeout timeout = Timeout.seconds(300); @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); @Rule public final TemporaryFolder tempDir = new TemporaryFolder(); @@ -129,13 +129,13 @@ public void testClientUpdateWithRevoke() throws Exception { } server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()==4, 100, 2000); + GenericTestUtils.waitFor(() -> client.getUpdateCount() == 4, 100, 2000); Assert.assertEquals(4, client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); revokeCertNow(certIds.get(5)); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>4, 100, 2000); + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 4, 100, 2000); Assert.assertEquals(5, client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); } catch (Exception e) { @@ -178,7 +178,7 @@ public void testClientUpdateWithDelayedRevoke() throws Exception { revokeCertNow((certIds.get(0))); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()==1, + GenericTestUtils.waitFor(() -> client.getUpdateCount() == 1, 100, 2000); Assert.assertEquals(1, client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); @@ -186,14 +186,14 @@ public void testClientUpdateWithDelayedRevoke() throws Exception { // revoke cert 5 with 10 seconds delay revokeCert(certIds.get(5), Instant.now().plus(Duration.ofSeconds(5))); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>1, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 1, 100, 2000); Assert.assertTrue(2 <= client.getUpdateCount()); Assert.assertEquals(0, client.getErrorCount()); Assert.assertTrue(1 >= client.getClientCRLStore() .getPendingCrlIds().size()); - GenericTestUtils.waitFor(() -> client.getPendingCrlRemoveCount()==1, + GenericTestUtils.waitFor(() -> client.getPendingCrlRemoveCount() == 1, 100, 20_000); Assert.assertTrue(client.getClientCRLStore() .getPendingCrlIds().isEmpty()); @@ -243,7 +243,7 @@ public void testClientUpdateWithRestart() throws Exception { revokeCertNow((certIds.get(i))); } server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()==4, + GenericTestUtils.waitFor(() -> client.getUpdateCount() == 4, 100, 2000); Assert.assertEquals(4, client.getUpdateCount()); @@ -257,7 +257,7 @@ public void testClientUpdateWithRestart() throws Exception { // client retry connect to the server. The client will handle that. server.stop(); server.start(); - GenericTestUtils.waitFor(() -> client.getErrorCount()==1, + GenericTestUtils.waitFor(() -> client.getErrorCount() == 1, 100, 2000); Assert.assertEquals(4, client.getUpdateCount()); Assert.assertEquals(1, client.getErrorCount()); @@ -266,7 +266,7 @@ public void testClientUpdateWithRestart() throws Exception { revokeCertNow(certIds.get(5)); server.notifyCrlUpdate(); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>4, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 4, 100, 5000); Assert.assertEquals(5, client.getUpdateCount()); Assert.assertEquals(1, client.getErrorCount()); @@ -282,16 +282,16 @@ public void testClientUpdateWithRestart() throws Exception { client.createChannel(); client.start(); Assert.assertEquals(5, clientCRLStore.getLatestCrlId()); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>5, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 5, 100, 2000); revokeCertNow(certIds.get(6)); // mostly noop server.notifyCrlUpdate(); LOG.info("Test client restart end."); - GenericTestUtils.waitFor(() -> client.getUpdateCount()>6, + GenericTestUtils.waitFor(() -> client.getUpdateCount() > 6, 100, 2000); - Assert.assertTrue(client.getUpdateCount()>=6); + Assert.assertTrue(client.getUpdateCount() >= 6); Assert.assertEquals(2, client.getErrorCount()); Assert.assertEquals(6, clientCRLStore.getLatestCrlId()); } catch (Exception e) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java index 84038bd2b54..17c6dee1f47 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -105,7 +105,8 @@ public void testUpgrade() throws Exception { // Set up new pre-finalized SCM. conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, haEnabledPreFinalized); - StorageContainerManager scm = TestUtils.getScm(conf); + StorageContainerManager scm = HddsTestUtils.getScm(conf); + Assert.assertEquals(UpgradeFinalizer.Status.FINALIZATION_REQUIRED, scm.getUpgradeFinalizer().getStatus()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java index ee6608e5201..762f946d83f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestScmStartupSlvLessThanMlv.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.upgrade.LayoutFeature; -import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils; +import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Rule; @@ -61,12 +61,12 @@ public void testStartupSlvLessThanMlv() throws Exception { // Create version file with MLV > SLV, which should fail the SCM // construction. - TestUpgradeUtils.createVersionFile(scmSubdir, HddsProtos.NodeType.SCM, mlv); + UpgradeTestUtils.createVersionFile(scmSubdir, HddsProtos.NodeType.SCM, mlv); try { new StorageContainerManager(conf); Assert.fail("Expected IOException due to incorrect MLV on SCM creation."); - } catch(IOException e) { + } catch (IOException e) { String expectedMessage = String.format("Metadata layout version (%s) > " + "software layout version (%s)", mlv, largestSlv); GenericTestUtils.assertExceptionContains(expectedMessage, e); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index dc11ecef98c..c0ff6466936 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.VersionInfo; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; @@ -275,14 +275,14 @@ public void testRegister() throws Exception { try (EndpointStateMachine rpcEndPoint = createEndpoint( SCMTestUtils.getConf(), serverAddress, 1000)) { SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getExtendedProtoBufMessage(), TestUtils + .register(nodeToRegister.getExtendedProtoBufMessage(), HddsTestUtils .createNodeReport( Arrays.asList(getStorageReports( nodeToRegister.getUuid())), Arrays.asList(getMetadataStorageReports( nodeToRegister.getUuid()))), - TestUtils.getRandomContainerReports(10), - TestUtils.getRandomPipelineReports(), + HddsTestUtils.getRandomContainerReports(10), + HddsTestUtils.getRandomPipelineReports(), defaultLayoutVersionProto()); Assert.assertNotNull(responseProto); Assert.assertEquals(nodeToRegister.getUuidString(), @@ -296,12 +296,13 @@ public void testRegister() throws Exception { private StorageReportProto getStorageReports(UUID id) { String storagePath = testDir.getAbsolutePath() + "/data-" + id; - return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null); + return HddsTestUtils.createStorageReport(id, storagePath, 100, 10, 90, + null); } private MetadataStorageReportProto getMetadataStorageReports(UUID id) { String storagePath = testDir.getAbsolutePath() + "/metadata-" + id; - return TestUtils.createMetadataStorageReport(storagePath, 100, 10, 90, + return HddsTestUtils.createMetadataStorageReport(storagePath, 100, 10, 90, null); } @@ -315,15 +316,15 @@ private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress, rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER); OzoneContainer ozoneContainer = mock(OzoneContainer.class); UUID datanodeID = UUID.randomUUID(); - when(ozoneContainer.getNodeReport()).thenReturn(TestUtils + when(ozoneContainer.getNodeReport()).thenReturn(HddsTestUtils .createNodeReport(Arrays.asList(getStorageReports(datanodeID)), Arrays.asList(getMetadataStorageReports(datanodeID)))); ContainerController controller = Mockito.mock(ContainerController.class); when(controller.getContainerReport()).thenReturn( - TestUtils.getRandomContainerReports(10)); + HddsTestUtils.getRandomContainerReports(10)); when(ozoneContainer.getController()).thenReturn(controller); when(ozoneContainer.getPipelineReport()).thenReturn( - TestUtils.getRandomPipelineReports()); + HddsTestUtils.getRandomPipelineReports()); HDDSLayoutVersionManager versionManager = Mockito.mock(HDDSLayoutVersionManager.class); when(versionManager.getMetadataLayoutVersion()) @@ -393,7 +394,7 @@ public void testHeartbeat() throws Exception { serverAddress, 1000)) { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( + .setNodeReport(HddsTestUtils.createNodeReport( Arrays.asList(getStorageReports(dataNode.getUuid())), Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))) .build(); @@ -416,7 +417,7 @@ public void testHeartbeatWithCommandStatusReport() throws Exception { SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder() .setDatanodeDetails(dataNode.getProtoBufMessage()) - .setNodeReport(TestUtils.createNodeReport( + .setNodeReport(HddsTestUtils.createNodeReport( Arrays.asList(getStorageReports(dataNode.getUuid())), Arrays.asList(getMetadataStorageReports(dataNode.getUuid())))) .build(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 33fb3557bbb..4db75d2052e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -183,7 +183,7 @@ public List getMostOrLeastUsedDatanodes(boolean mostUsed) { * @return DatanodeUsageInfo of the specified datanode */ @Override - public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn){ + public DatanodeUsageInfo getUsageInfo(DatanodeDetails dn) { return null; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java index ef79f7c6d13..f9391c7dd85 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java @@ -31,7 +31,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; @@ -149,9 +149,8 @@ public void testNodeReportProcessing() throws InterruptedException { long nrProcessed = getCounter("NumNodeReportProcessed"); StorageReportProto storageReport = - TestUtils.createStorageReport(registeredDatanode.getUuid(), "/tmp", 100, - 10, 90, - null); + HddsTestUtils.createStorageReport(registeredDatanode.getUuid(), "/tmp", + 100, 10, 90, null); NodeReportProto nodeReport = NodeReportProto.newBuilder() .addStorageReport(storageReport).build(); @@ -170,7 +169,7 @@ public void testNodeReportProcessingFailure() { DatanodeDetails randomDatanode = MockDatanodeDetails.randomDatanodeDetails(); - StorageReportProto storageReport = TestUtils.createStorageReport( + StorageReportProto storageReport = HddsTestUtils.createStorageReport( randomDatanode.getUuid(), "/tmp", 100, 10, 90, null); NodeReportProto nodeReport = NodeReportProto.newBuilder() @@ -188,7 +187,7 @@ public void testNodeReportProcessingFailure() { @Test public void testNodeCountAndInfoMetricsReported() throws Exception { - StorageReportProto storageReport = TestUtils.createStorageReport( + StorageReportProto storageReport = HddsTestUtils.createStorageReport( registeredDatanode.getUuid(), "/tmp", 100, 10, 90, null); NodeReportProto nodeReport = NodeReportProto.newBuilder() .addStorageReport(storageReport).build(); diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java index a1e47407e2f..61dd5bf1ad3 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/LambdaTestUtils.java @@ -521,7 +521,7 @@ public static E intercept( throws Exception { return intercept(clazz, contained, "Expecting " + clazz.getName() - + (contained != null? (" with text " + contained) : "") + + (contained != null ? (" with text " + contained) : "") + " but got ", () -> { eval.call(); @@ -589,7 +589,7 @@ public static void assertOptionalEquals(String message, T expected, Optional actual) { Assert.assertNotNull(message, actual); - Assert.assertTrue(message +" -not present", actual.isPresent()); + Assert.assertTrue(message + " -not present", actual.isPresent()); Assert.assertEquals(message, expected, actual.get()); } diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 9a3bce46d70..277bff61595 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -83,5 +83,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> mockito-core test + + org.slf4j + slf4j-api + + + org.slf4j + slf4j-log4j12 + ${slf4j.version} + diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java index 30416516068..7f24d843b0f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java @@ -41,24 +41,23 @@ * To start: * ozone admin containerbalancer start * [ -t/--threshold {@literal }] - * [ -i/--idleiterations {@literal }] - * [ -d/--maxDatanodesRatioToInvolvePerIteration - * {@literal }] + * [ -i/--iterations {@literal }] + * [ -d/--maxDatanodesPercentageToInvolvePerIteration + * {@literal }] * [ -s/--maxSizeToMovePerIterationInGB * {@literal }] * Examples: * ozone admin containerbalancer start * start balancer with default values in the configuration - * ozone admin containerbalancer start -t 0.05 + * ozone admin containerbalancer start -t 5 * start balancer with a threshold of 5% * ozone admin containerbalancer start -i 20 - * start balancer with maximum 20 consecutive idle iterations - * ozone admin containerbalancer start -i 0 + * start balancer with maximum 20 consecutive iterations + * ozone admin containerbalancer start -i -1 * run balancer infinitely with default values in the configuration - * ozone admin containerbalancer start -d 0.4 - * start balancer with the ratio of maximum datanodes to involve in - * balancing in one iteration to the total number of healthy datanodes as - * 0.4 + * ozone admin containerbalancer start -d 40 + * start balancer with maximum 40% of healthy, in-service datanodes + * involved in balancing * ozone admin containerbalancer start -s 10 * start balancer with maximum size of 10GB to move in one iteration * To stop: diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java index a4ee0d2c554..73d8511bb0c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStartSubcommand.java @@ -36,44 +36,50 @@ public class ContainerBalancerStartSubcommand extends ScmSubcommand { @Option(names = {"-t", "--threshold"}, - description = "Threshold target whether the cluster is balanced") + description = "Percentage deviation from average utilization of " + + "the cluster after which a datanode will be rebalanced (for " + + "example, '10' for 10%).") private Optional threshold; - @Option(names = {"-i", "--idleiterations"}, - description = "Maximum consecutive idle iterations") - private Optional idleiterations; + @Option(names = {"-i", "--iterations"}, + description = "Maximum consecutive iterations that" + + " balancer will run for.") + private Optional iterations; - @Option(names = {"-d", "--maxDatanodesRatioToInvolvePerIteration"}, - description = "The ratio of maximum number of datanodes that should be " + - "involved in balancing in one iteration to the total number of " + - "healthy, in service nodes known to container balancer.") - private Optional maxDatanodesRatioToInvolvePerIteration; + @Option(names = {"-d", "--maxDatanodesPercentageToInvolvePerIteration"}, + description = "Max percentage of healthy, in service datanodes " + + "that can be involved in balancing in one iteration (for example, " + + "'20' for 20%).") + private Optional maxDatanodesPercentageToInvolvePerIteration; @Option(names = {"-s", "--maxSizeToMovePerIterationInGB"}, - description = "Maximum size to move per iteration of balancing in GB, " + - "for 10GB it should be set as 10") + description = "Maximum size that can be moved per iteration of " + + "balancing (for example, '500' for 500GB).") private Optional maxSizeToMovePerIterationInGB; - @Option(names = {"-e", "--maxSizeEnteringTarget"}, - description = "the maximum size that can enter a target datanode while " + - "balancing in GB. This is the sum of data from multiple sources.") + @Option(names = {"-e", "--maxSizeEnteringTargetInGB"}, + description = "Maximum size that can enter a target datanode while " + + "balancing. This is the sum of data from multiple sources (for " + + "example, '26' for 26GB).") private Optional maxSizeEnteringTargetInGB; - @Option(names = {"-l", "--maxSizeLeavingSource"}, - description = "maximum size that can leave a source datanode while " + - "balancing in GB, it is the sum of data moving to multiple targets.") + @Option(names = {"-l", "--maxSizeLeavingSourceInGB"}, + description = "Maximum size that can leave a source datanode while " + + "balancing. This is the sum of data moving to multiple targets " + + "(for example, '26' for 26GB).") private Optional maxSizeLeavingSourceInGB; @Override public void execute(ScmClient scmClient) throws IOException { - boolean result = scmClient.startContainerBalancer(threshold, idleiterations, - maxDatanodesRatioToInvolvePerIteration, maxSizeToMovePerIterationInGB, - maxSizeEnteringTargetInGB, maxSizeLeavingSourceInGB); + boolean result = scmClient.startContainerBalancer(threshold, iterations, + maxDatanodesPercentageToInvolvePerIteration, + maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, + maxSizeLeavingSourceInGB); if (result) { - System.out.println("Starting ContainerBalancer Successfully."); + System.out.println("Container Balancer started successfully."); return; } - System.out.println("ContainerBalancer is already running, " + + System.out.println("Container Balancer is already running. " + "Please stop it first."); } } \ No newline at end of file diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java index e0cd436bdf0..44e4d4c9c50 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerStatusSubcommand.java @@ -36,7 +36,7 @@ public class ContainerBalancerStatusSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getContainerBalancerStatus(); - if(execReturn){ + if (execReturn) { System.out.println("ContainerBalancer is Running."); } else { System.out.println("ContainerBalancer is Not Running."); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 30f0ec8d46d..4fbabf9ba76 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -33,6 +33,8 @@ import org.apache.hadoop.hdds.scm.client.ScmClient; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; @@ -45,6 +47,7 @@ import java.io.IOException; import java.security.cert.X509Certificate; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.Optional; @@ -480,6 +483,25 @@ public ContainerWithPipeline getContainerWithPipeline(long containerId) return storageContainerLocationClient.getContainerWithPipeline(containerId); } + /** + * Gets the list of ReplicaInfo known by SCM for a given container. + * @param containerId - The Container ID + * @return List of ContainerReplicaInfo for the container or an empty list + * if none. + * @throws IOException + */ + @Override + public List + getContainerReplicas(long containerId) throws IOException { + List protos + = storageContainerLocationClient.getContainerReplicas(containerId); + List replicas = new ArrayList<>(); + for (HddsProtos.SCMContainerReplicaProto p : protos) { + replicas.add(ContainerReplicaInfo.fromProto(p)); + } + return replicas; + } + /** * Close a container. * @@ -550,16 +572,22 @@ public boolean getReplicationManagerStatus() throws IOException { return storageContainerLocationClient.getReplicationManagerStatus(); } + @Override + public ReplicationManagerReport getReplicationManagerReport() + throws IOException { + return storageContainerLocationClient.getReplicationManagerReport(); + } + @Override public boolean startContainerBalancer( - Optional threshold, Optional idleiterations, - Optional maxDatanodesRatioToInvolvePerIteration, + Optional threshold, Optional iterations, + Optional maxDatanodesPercentageToInvolvePerIteration, Optional maxSizeToMovePerIterationInGB, Optional maxSizeEnteringTargetInGB, Optional maxSizeLeavingSourceInGB) throws IOException { return storageContainerLocationClient.startContainerBalancer(threshold, - idleiterations, maxDatanodesRatioToInvolvePerIteration, + iterations, maxDatanodesPercentageToInvolvePerIteration, maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, maxSizeLeavingSourceInGB); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java index c6800befd8c..9bc3649dd9f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerStatusSubcommand.java @@ -43,7 +43,7 @@ public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.getReplicationManagerStatus(); // Output data list - if(execReturn){ + if (execReturn) { LOG.info("ReplicationManager is Running."); } else { LOG.info("ReplicationManager is Not Running."); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java index ba359af1c59..db2f02c5e12 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCheckSubcommand.java @@ -51,7 +51,7 @@ public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.inSafeMode(); // Output data list - if(execReturn){ + if (execReturn) { LOG.info("SCM is in safe mode."); if (verbose) { for (Map.Entry> entry : diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java index 12490c5c2c5..bcf64deb85e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeExitSubcommand.java @@ -42,7 +42,7 @@ public class SafeModeExitSubcommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { boolean execReturn = scmClient.forceExitSafeMode(); - if(execReturn){ + if (execReturn) { LOG.info("SCM exit safe mode successfully."); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index 00d678cff2e..f9dfc3fd00e 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -44,7 +44,8 @@ ListSubcommand.class, InfoSubcommand.class, CreateSubcommand.class, - CloseSubcommand.class + CloseSubcommand.class, + ReportSubcommand.class }) @MetaInfServices(SubcommandWithParent.class) public class ContainerCommands implements Callable, SubcommandWithParent { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 65884d24a4d..7b0daa6de75 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.cli.container; import java.io.IOException; +import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.hdds.cli.GenericParentCommand; @@ -25,10 +26,13 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; import org.apache.hadoop.hdds.scm.container.common.helpers .ContainerWithPipeline; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.server.JsonUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -67,13 +71,23 @@ public void execute(ScmClient scmClient) throws IOException { final ContainerWithPipeline container = scmClient. getContainerWithPipeline(containerID); Preconditions.checkNotNull(container, "Container cannot be null"); + List replicas = null; + try { + replicas = scmClient.getContainerReplicas(containerID); + } catch (IOException e) { + LOG.error("Unable to retrieve the replica details", e); + } if (json) { - LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(container)); + ContainerWithPipelineAndReplicas wrapper = + new ContainerWithPipelineAndReplicas(container.getContainerInfo(), + container.getPipeline(), replicas); + LOG.info(JsonUtils.toJsonStringWithDefaultPrettyPrinter(wrapper)); } else { // Print container report info. LOG.info("Container id: {}", containerID); - boolean verbose = spec.root().userObject() instanceof GenericParentCommand + boolean verbose = spec != null + && spec.root().userObject() instanceof GenericParentCommand && ((GenericParentCommand) spec.root().userObject()).isVerbose(); if (verbose) { LOG.info("Pipeline Info: {}", container.getPipeline()); @@ -87,10 +101,53 @@ public void execute(ScmClient scmClient) throws IOException { InfoSubcommand::buildDatanodeDetails) .collect(Collectors.joining(",\n")); LOG.info("Datanodes: [{}]", machinesStr); + + // Print the replica details if available + if (replicas != null) { + String replicaStr = replicas.stream().map( + InfoSubcommand::buildReplicaDetails) + .collect(Collectors.joining(",\n")); + LOG.info("Replicas: [{}]", replicaStr); + } } } private static String buildDatanodeDetails(DatanodeDetails details) { return details.getUuidString() + "/" + details.getHostName(); } + + private static String buildReplicaDetails(ContainerReplicaInfo replica) { + StringBuilder sb = new StringBuilder(); + sb.append("State: " + replica.getState() + ";"); + sb.append(" Origin: " + replica.getPlaceOfBirth().toString() + ";"); + sb.append(" Location: " + + buildDatanodeDetails(replica.getDatanodeDetails())); + return sb.toString(); + } + + private static class ContainerWithPipelineAndReplicas { + + private ContainerInfo containerInfo; + private Pipeline pipeline; + private List replicas; + + ContainerWithPipelineAndReplicas(ContainerInfo container, Pipeline pipeline, + List replicas) { + this.containerInfo = container; + this.pipeline = pipeline; + this.replicas = replicas; + } + + public ContainerInfo getContainerInfo() { + return containerInfo; + } + + public Pipeline getPipeline() { + return pipeline; + } + + public List getReplicas() { + return replicas; + } + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java new file mode 100644 index 00000000000..89b1a11cba8 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ReportSubcommand.java @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.container; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import picocli.CommandLine; + +import java.io.IOException; +import java.time.Instant; +import java.util.List; +import java.util.stream.Collectors; + +/** + * This is the handler to process the container report command. + */ +@CommandLine.Command( + name = "report", + description = "Display the container summary report", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class ReportSubcommand extends ScmSubcommand { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @Override + public void execute(ScmClient scmClient) throws IOException { + ReplicationManagerReport report = scmClient.getReplicationManagerReport(); + outputHeader(report.getReportTimeStamp()); + blankLine(); + outputContainerStats(report); + blankLine(); + outputContainerHealthStats(report); + blankLine(); + outputContainerSamples(report); + } + + private void outputHeader(long epochMs) { + Instant reportTime = Instant.ofEpochSecond(epochMs / 1000); + outputHeading("Container Summary Report generated at " + reportTime); + + } + + private void outputContainerStats(ReplicationManagerReport report) { + outputHeading("Container State Summary"); + for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { + long stat = report.getStat(state); + if (stat != -1) { + output(state + ": " + stat); + } + } + } + + private void outputContainerHealthStats(ReplicationManagerReport report) { + outputHeading("Container Health Summary"); + for (ReplicationManagerReport.HealthState state + : ReplicationManagerReport.HealthState.values()) { + long stat = report.getStat(state); + if (stat != -1) { + output(state + ": " + stat); + } + } + } + + private void outputContainerSamples(ReplicationManagerReport report) { + for (ReplicationManagerReport.HealthState state + : ReplicationManagerReport.HealthState.values()) { + List containers = report.getSample(state); + if (containers.size() > 0) { + output("First " + ReplicationManagerReport.SAMPLE_LIMIT + " " + + state + " containers:"); + output(containers + .stream() + .map(ContainerID::toString) + .collect(Collectors.joining(", "))); + blankLine(); + } + } + } + + private void blankLine() { + System.out.print("\n"); + } + + private void output(String s) { + System.out.println(s); + } + + private void outputHeading(String s) { + output(s); + for (int i = 0; i < s.length(); i++) { + System.out.print("="); + } + System.out.print("\n"); + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index 232cc8de693..23ff9176df9 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -52,7 +52,7 @@ public void execute(ScmClient scmClient) throws IOException { String.join("\n", hosts)); if (errors.size() > 0) { for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() +": " + System.err.println("Error: " + error.getHostname() + ": " + error.getError()); } // Throwing the exception will cause a non-zero exit status for the diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index 6d59e3c71a7..a64c400f66f 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -45,7 +45,7 @@ public class MaintenanceSubCommand extends ScmSubcommand { private List hosts = new ArrayList<>(); @CommandLine.Option(names = {"--end"}, - description = "Automatically end maintenance after the given hours. "+ + description = "Automatically end maintenance after the given hours. " + "By default, maintenance must be ended manually.") private int endInHours = 0; @@ -58,7 +58,7 @@ public void execute(ScmClient scmClient) throws IOException { String.join("\n", hosts)); if (errors.size() > 0) { for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() +": " + System.err.println("Error: " + error.getHostname() + ": " + error.getError()); } // Throwing the exception will cause a non-zero exit status for the diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java index 94b97dbe3a6..61f7826cf64 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java @@ -53,7 +53,7 @@ public void execute(ScmClient scmClient) throws IOException { String.join("\n", hosts)); if (errors.size() > 0) { for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() +": " + System.err.println("Error: " + error.getHostname() + ": " + error.getError()); } // Throwing the exception will cause a non-zero exit status for the diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java new file mode 100644 index 00000000000..ad43f9e0c20 --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestInfoSubCommand.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.container; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerReplicaInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.log4j.AppenderSkeleton; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.log4j.spi.LoggingEvent; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import picocli.CommandLine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.stream.Collectors; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.Mockito.mock; + +/** + * Tests for InfoSubCommand class. + */ +public class TestInfoSubCommand { + + private ScmClient scmClient; + private InfoSubcommand cmd; + private List datanodes; + private Logger logger; + private TestAppender appender; + + @Before + public void setup() throws IOException { + scmClient = mock(ScmClient.class); + datanodes = createDatanodeDetails(3); + Mockito.when(scmClient.getContainerWithPipeline(anyLong())) + .thenReturn(getContainerWithPipeline()); + + appender = new TestAppender(); + logger = Logger.getLogger( + org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand.class); + logger.addAppender(appender); + } + + @After + public void after() { + logger.removeAppender(appender); + } + + @Test + public void testReplicasIncludedInOutput() throws Exception { + Mockito.when(scmClient.getContainerReplicas(anyLong())) + .thenReturn(getReplicas()); + cmd = new InfoSubcommand(); + CommandLine c = new CommandLine(cmd); + c.parseArgs("1"); + cmd.execute(scmClient); + + // Ensure we have a line for Replicas: + List logs = appender.getLog(); + List replica = logs.stream() + .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) + .collect(Collectors.toList()); + Assert.assertEquals(1, replica.size()); + + // Ensure each DN UUID is mentioned in the message: + for (DatanodeDetails dn : datanodes) { + Pattern pattern = Pattern.compile(".*" + dn.getUuid().toString() + ".*", + Pattern.DOTALL); + Matcher matcher = pattern.matcher(replica.get(0).getRenderedMessage()); + Assert.assertTrue(matcher.matches()); + } + } + + @Test + public void testReplicasNotOutputIfError() throws IOException { + Mockito.when(scmClient.getContainerReplicas(anyLong())) + .thenThrow(new IOException("Error getting Replicas")); + cmd = new InfoSubcommand(); + CommandLine c = new CommandLine(cmd); + c.parseArgs("1"); + cmd.execute(scmClient); + + // Ensure we have no lines for Replicas: + List logs = appender.getLog(); + List replica = logs.stream() + .filter(m -> m.getRenderedMessage().matches("(?s)^Replicas:.*")) + .collect(Collectors.toList()); + Assert.assertEquals(0, replica.size()); + + // Ensure we have an error logged: + List error = logs.stream() + .filter(m -> m.getLevel() == Level.ERROR) + .collect(Collectors.toList()); + Assert.assertEquals(1, error.size()); + Assert.assertTrue(error.get(0).getRenderedMessage() + .matches("(?s)^Unable to retrieve the replica details.*")); + } + + @Test + public void testReplicasNotOutputIfErrorWithJson() throws IOException { + Mockito.when(scmClient.getContainerReplicas(anyLong())) + .thenThrow(new IOException("Error getting Replicas")); + cmd = new InfoSubcommand(); + CommandLine c = new CommandLine(cmd); + c.parseArgs("1", "--json"); + cmd.execute(scmClient); + + List logs = appender.getLog(); + Assert.assertEquals(2, logs.size()); + String error = logs.get(0).getRenderedMessage(); + String json = logs.get(1).getRenderedMessage(); + + Assert.assertTrue(error + .matches("(?s)^Unable to retrieve the replica details.*")); + Assert.assertFalse(json.matches("(?s).*replicas.*")); + } + + @Test + public void testReplicasOutputWithJson() throws IOException { + Mockito.when(scmClient.getContainerReplicas(anyLong())) + .thenReturn(getReplicas()); + cmd = new InfoSubcommand(); + CommandLine c = new CommandLine(cmd); + c.parseArgs("1", "--json"); + cmd.execute(scmClient); + + List logs = appender.getLog(); + Assert.assertEquals(1, logs.size()); + + // Ensure each DN UUID is mentioned in the message after replicas: + String json = logs.get(0).getRenderedMessage(); + Assert.assertTrue(json.matches("(?s).*replicas.*")); + for (DatanodeDetails dn : datanodes) { + Pattern pattern = Pattern.compile( + ".*replicas.*" + dn.getUuid().toString() + ".*", Pattern.DOTALL); + Matcher matcher = pattern.matcher(json); + Assert.assertTrue(matcher.matches()); + } + } + + private List getReplicas() { + List replicas = new ArrayList<>(); + for (DatanodeDetails dn : datanodes) { + ContainerReplicaInfo container = new ContainerReplicaInfo.Builder() + .setContainerID(1) + .setBytesUsed(1234) + .setState("CLOSED") + .setPlaceOfBirth(dn.getUuid()) + .setDatanodeDetails(dn) + .setKeyCount(1) + .setSequenceId(1).build(); + replicas.add(container); + } + return replicas; + } + + private ContainerWithPipeline getContainerWithPipeline() { + Pipeline pipeline = new Pipeline.Builder() + .setState(Pipeline.PipelineState.CLOSED) + .setReplicationConfig(new RatisReplicationConfig(THREE)) + .setId(PipelineID.randomId()) + .setNodes(datanodes) + .build(); + + ContainerInfo container = new ContainerInfo.Builder() + .setSequenceId(1) + .setPipelineID(pipeline.getId()) + .setUsedBytes(1234) + .setReplicationConfig(new RatisReplicationConfig(THREE)) + .setNumberOfKeys(1) + .setState(CLOSED) + .build(); + + return new ContainerWithPipeline(container, pipeline); + } + + private List createDatanodeDetails(int count) { + List dns = new ArrayList<>(); + for (int i = 0; i < count; i++) { + HddsProtos.DatanodeDetailsProto dnd = + HddsProtos.DatanodeDetailsProto.newBuilder() + .setHostName("host" + i) + .setIpAddress("1.2.3." + i + 1) + .setNetworkLocation("/default") + .setNetworkName("host" + i) + .addPorts(HddsProtos.Port.newBuilder() + .setName("ratis").setValue(5678).build()) + .setUuid(UUID.randomUUID().toString()) + .build(); + dns.add(DatanodeDetails.getFromProtoBuf(dnd)); + } + return dns; + } + + private static class TestAppender extends AppenderSkeleton { + private final List log = new ArrayList<>(); + + @Override + public boolean requiresLayout() { + return false; + } + + @Override + protected void append(final LoggingEvent loggingEvent) { + log.add(loggingEvent); + } + + @Override + public void close() { + } + + public List getLog() { + return new ArrayList<>(log); + } + } +} diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java new file mode 100644 index 00000000000..be0e2c8fb69 --- /dev/null +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/container/TestReportSubCommand.java @@ -0,0 +1,159 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.container; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.PrintStream; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.mock; + +/** + * Tests for the Container ReportSubCommand class. + */ +public class TestReportSubCommand { + + private ReportSubcommand cmd; + private static final int SEED = 10; + private final ByteArrayOutputStream outContent = new ByteArrayOutputStream(); + private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); + private final PrintStream originalOut = System.out; + private final PrintStream originalErr = System.err; + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); + + @Before + public void setup() throws UnsupportedEncodingException { + cmd = new ReportSubcommand(); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); + } + + @After + public void tearDown() { + System.setOut(originalOut); + System.setErr(originalErr); + } + + @Test + public void testCorrectValuesAppearInEmptyReport() throws IOException { + ScmClient scmClient = mock(ScmClient.class); + Mockito.when(scmClient.getReplicationManagerReport()) + .thenAnswer(invocation -> new ReplicationManagerReport()); + + cmd.execute(scmClient); + + for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { + Pattern p = Pattern.compile( + "^" + state.toString() + ": 0$", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + + for (ReplicationManagerReport.HealthState state : + ReplicationManagerReport.HealthState.values()) { + Pattern p = Pattern.compile( + "^" + state.toString() + ": 0$", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + } + } + + @Test + public void testCorrectValuesAppearInReport() throws IOException { + ScmClient scmClient = mock(ScmClient.class); + Mockito.when(scmClient.getReplicationManagerReport()) + .thenAnswer(invocation -> createReport()); + + cmd.execute(scmClient); + + int counter = SEED; + for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { + Pattern p = Pattern.compile( + "^" + state.toString() + ": " + counter + "$", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + counter++; + } + + counter = SEED; + for (ReplicationManagerReport.HealthState state : + ReplicationManagerReport.HealthState.values()) { + Pattern p = Pattern.compile( + "^" + state.toString() + ": " + counter + "$", Pattern.MULTILINE); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + + // Check the correct samples are returned + p = Pattern.compile( + "^First 100 " + state + " containers:\n" + + containerList(0, counter) + "$", Pattern.MULTILINE); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); + assertTrue(m.find()); + counter++; + } + } + + private ReplicationManagerReport createReport() { + ReplicationManagerReport report = new ReplicationManagerReport(); + + int counter = SEED; + for (HddsProtos.LifeCycleState state : HddsProtos.LifeCycleState.values()) { + for (int i = 0; i < counter; i++) { + report.increment(state); + } + counter++; + } + + // Add samples + counter = SEED; + for (ReplicationManagerReport.HealthState state + : ReplicationManagerReport.HealthState.values()) { + for (int i = 0; i < counter; i++) { + report.incrementAndSample(state, ContainerID.valueOf(i)); + } + counter++; + } + return report; + } + + private String containerList(int start, int end) { + StringBuilder sb = new StringBuilder(); + for (int i = start; i < end; i++) { + if (i != start) { + sb.append(", "); + } + sb.append("#" + i); + } + return sb.toString(); + } + +} diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java index 8ed6acd7b2b..a7ef77996a4 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestContainerBalancerSubCommand.java @@ -119,8 +119,8 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsNotRunning() .thenAnswer(invocation -> true); startCmd.execute(scmClient); - Pattern p = Pattern.compile("^Starting\\sContainerBalancer" + - "\\sSuccessfully."); + Pattern p = Pattern.compile("^Container\\sBalancer\\sstarted" + + "\\ssuccessfully."); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -134,8 +134,8 @@ public void testContainerBalancerStartSubcommandWhenBalancerIsRunning() .thenAnswer(invocation -> false); startCmd.execute(scmClient); - Pattern p = Pattern.compile("^ContainerBalancer\\sis\\salready\\srunning," + - "\\sPlease\\sstop\\sit\\sfirst."); + Pattern p = Pattern.compile("^Container\\sBalancer\\sis\\salready" + + "\\srunning.\\sPlease\\sstop\\sit\\sfirst."); Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java index 70c74a9b98a..69b0efbda1c 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java @@ -90,7 +90,7 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput() assertTrue(m.find()); for (HddsProtos.NodeState state : HddsProtos.NodeState.values()) { p = Pattern.compile( - "^Health State:\\s+"+state+"$", Pattern.MULTILINE); + "^Health State:\\s+" + state + "$", Pattern.MULTILINE); m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } @@ -106,11 +106,11 @@ public void testDataNodeOperationalStateAndHealthIncludedInOutput() private List getNodeDetails() { List nodes = new ArrayList<>(); - for (int i=0; i<4; i++) { + for (int i = 0; i < 4; i++) { HddsProtos.DatanodeDetailsProto.Builder dnd = HddsProtos.DatanodeDetailsProto.newBuilder(); dnd.setHostName("host" + i); - dnd.setIpAddress("1.2.3." + i+1); + dnd.setIpAddress("1.2.3." + i + 1); dnd.setNetworkLocation("/default"); dnd.setNetworkName("host" + i); dnd.addPorts(HddsProtos.Port.newBuilder() diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index f7cbd5597a5..b5ba8e7e537 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -51,6 +51,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j slf4j-log4j12 + + + org.mockito + mockito-core test diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index 1f819ac91f9..2412b889fab 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -207,7 +207,7 @@ public Iterator listVolumes(String volumePrefix, public Iterator listVolumesByUser(String user, String volumePrefix, String prevVolume) throws IOException { - if(Strings.isNullOrEmpty(user)) { + if (Strings.isNullOrEmpty(user)) { user = UserGroupInformation.getCurrentUser().getUserName(); } return new VolumeIterator(user, volumePrefix, prevVolume); @@ -269,7 +269,7 @@ public boolean hasNext() { @Override public OzoneVolume next() { - if(hasNext()) { + if (hasNext()) { currentValue = currentIterator.next(); return currentValue; } @@ -284,7 +284,7 @@ public OzoneVolume next() { private List getNextListOfVolumes(String prevVolume) { try { //if user is null, we do list of all volumes. - if(user != null) { + if (user != null) { return proxy.listVolumes(user, volPrefix, prevVolume, listCacheSize); } return proxy.listVolumes(volPrefix, prevVolume, listCacheSize); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index 23cf922918f..a292ae263f7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -611,7 +611,7 @@ public long getUsedNamespace() { * @return {@code Iterator} */ public Iterator listKeys(String keyPrefix) - throws IOException{ + throws IOException { return listKeys(keyPrefix, null); } @@ -905,6 +905,17 @@ public OzoneMultipartUploadList listMultipartUploads(String prefix) return proxy.listMultipartUploads(volumeName, getName(), prefix); } + /** + * Sets/Changes the owner of this Bucket. + * @param userName new owner + * @throws IOException + */ + public boolean setOwner(String userName) throws IOException { + boolean result = proxy.setBucketOwner(volumeName, name, userName); + this.owner = userName; + return result; + } + /** * An Iterator to iterate over {@link OzoneKey} list. */ @@ -928,7 +939,7 @@ void setKeyPrefix(String keyPrefixPath) { * The returned keys match key prefix. * @param keyPrefix */ - KeyIterator(String keyPrefix, String prevKey) throws IOException{ + KeyIterator(String keyPrefix, String prevKey) throws IOException { setKeyPrefix(keyPrefix); this.currentValue = null; this.currentIterator = getNextListOfKeys(prevKey).iterator(); @@ -936,7 +947,7 @@ void setKeyPrefix(String keyPrefixPath) { @Override public boolean hasNext() { - if(!currentIterator.hasNext() && currentValue != null) { + if (!currentIterator.hasNext() && currentValue != null) { try { currentIterator = getNextListOfKeys(currentValue.getName()).iterator(); @@ -949,7 +960,7 @@ public boolean hasNext() { @Override public OzoneKey next() { - if(hasNext()) { + if (hasNext()) { currentValue = currentIterator.next(); return currentValue; } @@ -997,7 +1008,7 @@ List getNextListOfKeys(String prevKey) throws * * Note: Does not guarantee to return the list of keys in a sorted order. */ - private class KeyIteratorWithFSO extends KeyIterator{ + private class KeyIteratorWithFSO extends KeyIterator { private Stack stack; private List pendingItemsToBeBatched; diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index 9bf3973aeae..2830bb13c04 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -51,7 +51,7 @@ public final class OzoneClientFactory { /** * Private constructor, class is not meant to be initialized. */ - private OzoneClientFactory(){} + private OzoneClientFactory() { } /** diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java index 9326bed5e97..e37969d42a6 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneKey.java @@ -107,7 +107,7 @@ public String getVolumeName() { * * @return bucketName */ - public String getBucketName(){ + public String getBucketName() { return bucketName; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 3847b1214f6..389ccf80011 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -480,7 +480,7 @@ public boolean hasNext() { @Override public OzoneBucket next() { - if(hasNext()) { + if (hasNext()) { currentValue = currentIterator.next(); return currentValue; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/AbstractBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/AbstractBlockChecksumComputer.java new file mode 100644 index 00000000000..4be13e63314 --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/AbstractBlockChecksumComputer.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.client.checksum; + +import java.io.IOException; +import java.nio.ByteBuffer; + +/** + * Base class for computing block checksum which is a function of chunk + * checksums. + */ +public abstract class AbstractBlockChecksumComputer { + private ByteBuffer outByteBuffer; + + /** + * Compute block checksum. The result can be obtained by getOutBytes(). + * @throws IOException + */ + public abstract void compute() throws IOException; + + public ByteBuffer getOutByteBuffer() { + return outByteBuffer; + } + + public void setOutBytes(byte[] bytes) { + this.outByteBuffer = ByteBuffer.wrap(bytes); + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java new file mode 100644 index 00000000000..ece725f3f26 --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.client.checksum; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.io.DataOutputBuffer; +import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.List; + +/** + * The base class to support file checksum. + */ +public abstract class BaseFileChecksumHelper { + static final Logger LOG = + LoggerFactory.getLogger(BaseFileChecksumHelper.class); + + private OzoneVolume volume; + private OzoneBucket bucket; + private String keyName; + private final long length; + private ClientProtocol rpcClient; + + private XceiverClientFactory xceiverClientFactory; + private final DataOutputBuffer blockChecksumBuf = new DataOutputBuffer(); + private FileChecksum fileChecksum; + private List keyLocationInfos; + private long remaining = 0L; + private int bytesPerCRC = -1; + private long crcPerBlock = 0; + + // initialization + BaseFileChecksumHelper( + OzoneVolume volume, OzoneBucket bucket, String keyName, + long length, ClientProtocol rpcClient) throws IOException { + + this.volume = volume; + this.bucket = bucket; + this.keyName = keyName; + this.length = length; + this.rpcClient = rpcClient; + this.xceiverClientFactory = + ((RpcClient)rpcClient).getXceiverClientManager(); + if (this.length > 0) { + fetchBlocks(); + } + } + + protected String getSrc() { + return "Volume: " + volume.getName() + " Bucket: " + bucket.getName() + " " + + keyName; + } + + protected long getLength() { + return length; + } + + protected ClientProtocol getRpcClient() { + return rpcClient; + } + + protected XceiverClientFactory getXceiverClientFactory() { + return xceiverClientFactory; + } + + protected DataOutputBuffer getBlockChecksumBuf() { + return blockChecksumBuf; + } + + protected List getKeyLocationInfoList() { + return keyLocationInfos; + } + + protected long getRemaining() { + return remaining; + } + + protected void setRemaining(long remaining) { + this.remaining = remaining; + } + + int getBytesPerCRC() { + return bytesPerCRC; + } + + protected void setBytesPerCRC(int bytesPerCRC) { + this.bytesPerCRC = bytesPerCRC; + } + + /** + * Request the blocks created in the most recent version from Ozone Manager. + * + * @throws IOException + */ + private void fetchBlocks() throws IOException { + OzoneManagerProtocol ozoneManagerClient = + getRpcClient().getOzoneManagerClient(); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume.getName()) + .setBucketName(bucket.getName()) + .setKeyName(keyName) + .setRefreshPipeline(true) + .setSortDatanodesInPipeline(true) + .setLatestVersionLocation(true) + .build(); + OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); + + // use OmKeyArgs to call Om.lookup() and get OmKeyInfo + keyLocationInfos = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly(); + } + + /** + * Compute file checksum given the list of chunk checksums requested earlier. + * @throws IOException + */ + public void compute() throws IOException { + /** + * request length is 0 or the file is empty, return one with the + * magic entry that matches the md5 of a 32 byte zero-padded byte array. + */ + if (keyLocationInfos == null || keyLocationInfos.isEmpty()) { + // Explicitly specified here in case the default DataOutputBuffer + // buffer length value is changed in future. + final int lenOfZeroBytes = 32; + byte[] emptyBlockMd5 = new byte[lenOfZeroBytes]; + MD5Hash fileMD5 = MD5Hash.digest(emptyBlockMd5); + fileChecksum = new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5); + } else { + checksumBlocks(); + fileChecksum = makeFinalResult(); + } + } + + @VisibleForTesting + List getKeyLocationInfos() { + return keyLocationInfos; + } + + + /** + * Compute block checksums block by block and append the raw bytes of the + * block checksums into getBlockChecksumBuf(). + * + * @throws IOException + */ + protected abstract void checksumBlocks() throws IOException; + + /** + * Make final file checksum result given the per-block or per-block-group + * checksums collected into getBlockChecksumBuf(). + */ + private FileChecksum makeFinalResult() throws IOException { + // TODO: support composite CRC + return makeMd5CrcResult(); + } + + private FileChecksum makeMd5CrcResult() { + // TODO: support CRC32C + //compute file MD5 + final MD5Hash fileMD5 = MD5Hash.digest(getBlockChecksumBuf().getData()); + // assume CRC32 for now + return new MD5MD5CRC32GzipFileChecksum(getBytesPerCRC(), + crcPerBlock, fileMD5); + } + + public FileChecksum getFileChecksum() { + return fileChecksum; + } + +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java new file mode 100644 index 00000000000..2d0e198dbca --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedBlockChecksumComputer.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.client.checksum; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.io.MD5Hash; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.List; + +/** + * The implementation of AbstractBlockChecksumComputer for replicated blocks. + */ +public class ReplicatedBlockChecksumComputer extends + AbstractBlockChecksumComputer { + + private static final Logger LOG = + LoggerFactory.getLogger(ReplicatedBlockChecksumComputer.class); + + private List chunkInfoList; + + public ReplicatedBlockChecksumComputer( + List chunkInfoList) + throws IOException { + this.chunkInfoList = chunkInfoList; + } + + @Override + public void compute() throws IOException { + computeMd5Crc(); + } + + // compute the block checksum, which is the md5 of chunk checksums + private void computeMd5Crc() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + for (ContainerProtos.ChunkInfo chunkInfo : chunkInfoList) { + ContainerProtos.ChecksumData checksumData = + chunkInfo.getChecksumData(); + List checksums = checksumData.getChecksumsList(); + + for (ByteString checksum : checksums) { + baos.write(checksum.toByteArray()); + } + } + + MD5Hash fileMD5 = MD5Hash.digest(baos.toByteArray()); + setOutBytes(fileMD5.getDigest()); + + LOG.debug("number of chunks={}, md5out={}", + chunkInfoList.size(), fileMD5); + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java new file mode 100644 index 00000000000..5b79eb870ff --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java @@ -0,0 +1,187 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.client.checksum; + +import org.apache.hadoop.fs.PathIOException; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientSpi; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; +import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; +import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.security.token.Token; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; + +/** + * The helper class to compute file checksum for replicated files. + */ +public class ReplicatedFileChecksumHelper extends BaseFileChecksumHelper { + private int blockIdx; + + ReplicatedFileChecksumHelper( + OzoneVolume volume, OzoneBucket bucket, String keyName, long length, + RpcClient rpcClient) throws IOException { + super(volume, bucket, keyName, length, rpcClient); + } + + @Override + protected void checksumBlocks() throws IOException { + long currentLength = 0; + for (blockIdx = 0; + blockIdx < getKeyLocationInfoList().size() && getRemaining() >= 0; + blockIdx++) { + OmKeyLocationInfo keyLocationInfo = + getKeyLocationInfoList().get(blockIdx); + currentLength += keyLocationInfo.getLength(); + if (currentLength > getLength()) { + return; + } + + if (!checksumBlock(keyLocationInfo)) { + throw new PathIOException( + getSrc(), "Fail to get block MD5 for " + keyLocationInfo); + } + } + } + + /** + * Return true when sounds good to continue or retry, false when severe + * condition or totally failed. + */ + private boolean checksumBlock(OmKeyLocationInfo keyLocationInfo) + throws IOException { + + long blockNumBytes = keyLocationInfo.getLength(); + + if (getRemaining() < blockNumBytes) { + blockNumBytes = getRemaining(); + } + setRemaining(getRemaining() - blockNumBytes); + // for each block, send request + List chunkInfos = + getChunkInfos(keyLocationInfo); + ContainerProtos.ChecksumData checksumData = + chunkInfos.get(0).getChecksumData(); + int bytesPerChecksum = checksumData.getBytesPerChecksum(); + setBytesPerCRC(bytesPerChecksum); + + ByteBuffer blockChecksumByteBuffer = getBlockChecksumFromChunkChecksums( + keyLocationInfo, chunkInfos); + String blockChecksumForDebug = + populateBlockChecksumBuf(blockChecksumByteBuffer); + + LOG.debug("got reply from pipeline {} for block {}: blockChecksum={}, " + + "blockChecksumType={}", + keyLocationInfo.getPipeline(), keyLocationInfo.getBlockID(), + blockChecksumForDebug, checksumData.getType()); + return true; + } + + // copied from BlockInputStream + /** + * Send RPC call to get the block info from the container. + * @return List of chunks in this block. + */ + protected List getChunkInfos( + OmKeyLocationInfo keyLocationInfo) throws IOException { + // irrespective of the container state, we will always read via Standalone + // protocol. + Token token = keyLocationInfo.getToken(); + Pipeline pipeline = keyLocationInfo.getPipeline(); + BlockID blockID = keyLocationInfo.getBlockID(); + if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) { + pipeline = Pipeline.newBuilder(pipeline) + .setReplicationConfig(new StandaloneReplicationConfig( + ReplicationConfig + .getLegacyFactor(pipeline.getReplicationConfig()))) + .build(); + } + + boolean success = false; + List chunks; + XceiverClientSpi xceiverClientSpi = null; + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Initializing BlockInputStream for get key to access {}", + blockID.getContainerID()); + } + xceiverClientSpi = + getXceiverClientFactory().acquireClientForReadData(pipeline); + + ContainerProtos.DatanodeBlockID datanodeBlockID = blockID + .getDatanodeBlockIDProtobuf(); + ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls + .getBlock(xceiverClientSpi, datanodeBlockID, token); + + chunks = response.getBlockData().getChunksList(); + success = true; + } finally { + if (!success && xceiverClientSpi != null) { + getXceiverClientFactory().releaseClientForReadData( + xceiverClientSpi, false); + } + } + + return chunks; + } + + // TODO: copy BlockChecksumHelper here + ByteBuffer getBlockChecksumFromChunkChecksums( + OmKeyLocationInfo keyLocationInfo, + List chunkInfoList) + throws IOException { + AbstractBlockChecksumComputer blockChecksumComputer = + new ReplicatedBlockChecksumComputer(chunkInfoList); + // TODO: support composite CRC + blockChecksumComputer.compute(); + + return blockChecksumComputer.getOutByteBuffer(); + } + + /** + * Parses out the raw blockChecksum bytes from {@code checksumData} byte + * buffer according to the blockChecksumType and populates the cumulative + * blockChecksumBuf with it. + * + * @return a debug-string representation of the parsed checksum if + * debug is enabled, otherwise null. + */ + String populateBlockChecksumBuf(ByteBuffer checksumData) + throws IOException { + String blockChecksumForDebug = null; + //read md5 + final MD5Hash md5 = new MD5Hash(checksumData.array()); + md5.write(getBlockChecksumBuf()); + if (LOG.isDebugEnabled()) { + blockChecksumForDebug = md5.toString(); + } + + return blockChecksumForDebug; + } +} diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java new file mode 100644 index 00000000000..9345072f63f --- /dev/null +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client.checksum; + +/** + * This package contains Ozone Client classes. + */ \ No newline at end of file diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index 45e84735597..5aff6856bd4 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -248,7 +248,7 @@ void incCurrentPosition(long len) { /** * Increases current position by one. Used in writes. */ - void incCurrentPosition(){ + void incCurrentPosition() { currentPosition++; } @@ -280,7 +280,7 @@ void updateBlockID(BlockID id) { this.blockID = id; } - OzoneClientConfig getConf(){ + OzoneClientConfig getConf() { return this.config; } @@ -305,7 +305,7 @@ public Pipeline getPipeline() { * OMKeyLocationInfo. * @return */ - Pipeline getPipelineForOMLocationReport(){ + Pipeline getPipelineForOMLocationReport() { return getPipeline(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java index e84e39abeb7..f440cf93988 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyInputStream.java @@ -351,14 +351,14 @@ public boolean seekToNewSource(long targetPos) throws IOException { } @Override - public int available() throws IOException { + public synchronized int available() throws IOException { checkOpen(); long remaining = length - getPos(); return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE; } @Override - public void close() throws IOException { + public synchronized void close() throws IOException { closed = true; for (BlockInputStream blockStream : blockStreams) { blockStream.close(); @@ -388,7 +388,7 @@ public long getRemainingOfIndex(int index) throws IOException { } @Override - public long skip(long n) throws IOException { + public synchronized long skip(long n) throws IOException { if (n <= 0) { return 0; } @@ -399,7 +399,7 @@ public long skip(long n) throws IOException { } @Override - public void unbuffer() { + public synchronized void unbuffer() { for (BlockInputStream is : blockStreams) { is.unbuffer(); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java index 8d6ea10e412..8502fec3c52 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.OzoneAcl; @@ -42,6 +43,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -772,4 +774,29 @@ OzoneKey headObject(String volumeName, String bucketName, * Clears the S3 Authentication information attached to the thread. */ void clearTheadLocalS3Auth(); + + /** + * Sets the owner of bucket. + * @param volumeName Name of the Volume + * @param bucketName Name of the Bucket + * @param owner to be set for the bucket + * @throws IOException + */ + boolean setBucketOwner(String volumeName, String bucketName, + String owner) throws IOException; + + /** + * Reads every replica for all the blocks associated with a given key. + * @param volumeName Volume name. + * @param bucketName Bucket name. + * @param keyName Key name. + * @return For every OmKeyLocationInfo (represents a block) it is mapped + * every replica, which is constructed by the DatanodeDetails and an + * inputstream made from the block. + * @throws IOException + */ + Map> getKeysEveryReplicas( + String volumeName, String bucketName, String keyName) + throws IOException; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java index eecc73bc3d9..470f6959787 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java @@ -80,7 +80,7 @@ public static KeyProvider.KeyVersion decryptEncryptedDataEncryptionKey( */ public static Text getKeyProviderMapKey(URI namespaceUri) { return new Text(O3_KMS_PREFIX + namespaceUri.getScheme() - +"://" + namespaceUri.getAuthority()); + + "://" + namespaceUri.getAuthority()); } public static String bytes2String(byte[] bytes) { @@ -131,7 +131,7 @@ public static URI getKeyProviderUri(UserGroupInformation ugi, } public static KeyProvider getKeyProvider(final ConfigurationSource conf, - final URI serverProviderUri) throws IOException{ + final URI serverProviderUri) throws IOException { if (serverProviderUri == null) { throw new IOException("KMS serverProviderUri is not configured."); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 9ca86939bf4..328044184b0 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -31,6 +31,8 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.UUID; @@ -48,6 +50,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; @@ -55,6 +58,8 @@ import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.io.Text; @@ -89,6 +94,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; @@ -280,6 +286,10 @@ public void onRemoval( }).build(); } + public XceiverClientFactory getXceiverClientManager() { + return xceiverClientManager; + } + static boolean validateOmVersion(String expectedVersion, List serviceInfoList) { if (expectedVersion == null || expectedVersion.isEmpty()) { @@ -527,7 +537,7 @@ public void createBucket( List listOfAcls = getAclList(); //ACLs from BucketArgs - if(bucketArgs.getAcls() != null) { + if (bucketArgs.getAcls() != null) { listOfAcls.addAll(bucketArgs.getAcls()); } @@ -846,7 +856,7 @@ public OzoneOutputStream createKey( .setAcls(getAclList()) .setLatestVersionLocation(getLatestVersionLocation); if (Boolean.parseBoolean(metadata.get(OzoneConsts.GDPR_FLAG))) { - try{ + try { GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom()); builder.addAllMetadata(gKey.getKeyDetails()); } catch (Exception e) { @@ -870,13 +880,17 @@ private KeyProvider.KeyVersion getDEK(FileEncryptionInfo feInfo) OzoneKMSUtil.checkCryptoProtocolVersion(feInfo); KeyProvider.KeyVersion decrypted = null; try { - // Do proxy thing only when current UGI not matching with login UGI - // In this way, proxying is done only for s3g where - // s3g can act as proxy to end user. + + // After HDDS-5881 the user will not be different, + // as S3G uses single RpcClient. So we should be checking thread-local + // S3Auth and use it during proxy. UserGroupInformation loginUser = UserGroupInformation.getLoginUser(); - if (!ugi.getShortUserName().equals(loginUser.getShortUserName())) { - UserGroupInformation proxyUser = UserGroupInformation.createProxyUser( - ugi.getShortUserName(), loginUser); + UserGroupInformation proxyUser; + if (getThreadLocalS3Auth() != null) { + UserGroupInformation s3gUGI = UserGroupInformation.createRemoteUser( + getThreadLocalS3Auth().getAccessID()); + proxyUser = UserGroupInformation.createProxyUser( + s3gUGI.getShortUserName(), loginUser); decrypted = proxyUser.doAs( (PrivilegedExceptionAction) () -> { return OzoneKMSUtil.decryptEncryptedDataEncryptionKey(feInfo, @@ -912,6 +926,63 @@ public OzoneInputStream getKey( return getInputStreamWithRetryFunction(keyInfo); } + @Override + public Map< OmKeyLocationInfo, Map > + getKeysEveryReplicas(String volumeName, + String bucketName, + String keyName) throws IOException { + + Map< OmKeyLocationInfo, Map > result + = new LinkedHashMap<>(); + + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setRefreshPipeline(true) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) + .build(); + + OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs); + List keyLocationInfos + = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly(); + + for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) { + Map blocks = new HashMap<>(); + + Pipeline pipelineBefore = keyLocationInfo.getPipeline(); + List datanodes = pipelineBefore.getNodes(); + + for (DatanodeDetails dn : datanodes) { + List nodes = new ArrayList<>(); + nodes.add(dn); + Pipeline pipeline + = new Pipeline.Builder(pipelineBefore).setNodes(nodes) + .setId(PipelineID.randomId()).build(); + keyLocationInfo.setPipeline(pipeline); + + List keyLocationInfoList = new ArrayList<>(); + keyLocationInfoList.add(keyLocationInfo); + OmKeyLocationInfoGroup keyLocationInfoGroup + = new OmKeyLocationInfoGroup(0, keyLocationInfoList); + List keyLocationInfoGroups = new ArrayList<>(); + keyLocationInfoGroups.add(keyLocationInfoGroup); + + keyInfo.setKeyLocationVersions(keyLocationInfoGroups); + OzoneInputStream is = createInputStream(keyInfo, Function.identity()); + + blocks.put(dn, is); + } + + result.put(keyLocationInfo, blocks); + } + + return result; + } + @Override public void deleteKey( String volumeName, String bucketName, String keyName, boolean recursive) @@ -944,7 +1015,7 @@ public void renameKey(String volumeName, String bucketName, String fromKeyName, String toKeyName) throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(toKeyName); } HddsClientUtils.checkNotNull(fromKeyName, toKeyName); @@ -1086,13 +1157,13 @@ public OzoneOutputStream createMultipartKey(String volumeName, throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); - if(checkKeyNameEnabled) { + if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(keyName); } HddsClientUtils.checkNotNull(keyName, uploadID); - Preconditions.checkArgument(partNumber > 0 && partNumber <=10000, "Part " + + Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " + "number should be greater than zero and less than or equal to 10000"); - Preconditions.checkArgument(size >=0, "size should be greater than or " + + Preconditions.checkArgument(size >= 0, "size should be greater than or " + "equal to zero"); String requestId = UUID.randomUUID().toString(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() @@ -1437,7 +1508,7 @@ private OzoneInputStream createInputStream( final KeyProvider.KeyVersion decrypted = getDEK(feInfo); List cryptoInputStreams = new ArrayList<>(); - for(LengthInputStream lengthInputStream : lengthInputStreams) { + for (LengthInputStream lengthInputStream : lengthInputStreams) { final OzoneCryptoInputStream ozoneCryptoInputStream = new OzoneCryptoInputStream(lengthInputStream, OzoneKMSUtil.getCryptoCodec(conf, feInfo), @@ -1475,11 +1546,11 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey, decrypted.getMaterial(), feInfo.getIV()); return new OzoneOutputStream(cryptoOut); } else { - try{ + try { GDPRSymmetricKey gk; Map openKeyMetadata = openKey.getKeyInfo().getMetadata(); - if(Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))){ + if (Boolean.valueOf(openKeyMetadata.get(OzoneConsts.GDPR_FLAG))) { gk = new GDPRSymmetricKey( openKeyMetadata.get(OzoneConsts.GDPR_SECRET), openKeyMetadata.get(OzoneConsts.GDPR_ALGORITHM) @@ -1488,7 +1559,7 @@ private OzoneOutputStream createOutputStream(OpenKeySession openKey, return new OzoneOutputStream( new CipherOutputStream(keyOutputStream, gk.getCipher())); } - }catch (Exception ex){ + } catch (Exception ex) { throw new IOException(ex); } @@ -1575,4 +1646,17 @@ public S3Auth getThreadLocalS3Auth() { public void clearTheadLocalS3Auth() { ozoneManagerClient.clearThreadLocalS3Auth(); } + + @Override + public boolean setBucketOwner(String volumeName, String bucketName, + String owner) throws IOException { + verifyVolumeName(volumeName); + verifyBucketName(bucketName); + Preconditions.checkNotNull(owner); + OmBucketArgs.Builder builder = OmBucketArgs.newBuilder(); + builder.setVolumeName(volumeName) + .setBucketName(bucketName) + .setOwnerName(owner); + return ozoneManagerClient.setBucketOwner(builder.build()); + } } diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java index 712120d223b..6c047ab998a 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java @@ -65,7 +65,7 @@ public class TestHddsClientUtils { public Timeout timeout = Timeout.seconds(300); @Rule - public ExpectedException thrown= ExpectedException.none(); + public ExpectedException thrown = ExpectedException.none(); /** * Verify client endpoint lookup failure if it is not configured. @@ -104,7 +104,7 @@ public void testGetScmClientAddressForHA() { conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); String[] nodes = new String[] {"scm1", "scm2", "scm3"}; - conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY+"."+scmServiceId, + conf.set(ScmConfigKeys.OZONE_SCM_NODES_KEY + "." + scmServiceId, "scm1,scm2,scm3"); conf.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, "scm1"); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java new file mode 100644 index 00000000000..fba0773d15f --- /dev/null +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedBlockChecksumComputer.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.client.checksum; + +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.io.MD5Hash; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.junit.Test; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.List; + +import static org.junit.Assert.assertArrayEquals; + +/** + * Unit tests for ReplicatedBlockChecksumComputer class. + */ +public class TestReplicatedBlockChecksumComputer { + @Test + public void testComputeMd5Crc() throws IOException { + final int lenOfZeroBytes = 32; + byte[] emptyChunkChecksum = new byte[lenOfZeroBytes]; + MD5Hash emptyBlockMD5 = MD5Hash.digest(emptyChunkChecksum); + byte[] emptyBlockMD5Hash = emptyBlockMD5.getDigest(); + + ByteString checkSum = ByteString.copyFrom(emptyChunkChecksum); + + ContainerProtos.ChecksumData checksumData = + ContainerProtos.ChecksumData.newBuilder() + .addChecksums(checkSum) + .setBytesPerChecksum(4) + .setType(ContainerProtos.ChecksumType.CRC32) + .build(); + ContainerProtos.ChunkInfo chunkInfo = + ContainerProtos.ChunkInfo.newBuilder() + .setChecksumData(checksumData) + .setChunkName("dummy_chunk") + .setOffset(0) + .setLen(lenOfZeroBytes) + .build(); + List chunkInfoList = + Collections.singletonList(chunkInfo); + AbstractBlockChecksumComputer computer = + new ReplicatedBlockChecksumComputer(chunkInfoList); + + computer.compute(); + + ByteBuffer output = computer.getOutByteBuffer(); + assertArrayEquals(emptyBlockMD5Hash, output.array()); + } +} \ No newline at end of file diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java new file mode 100644 index 00000000000..198602dd2d3 --- /dev/null +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/TestReplicatedFileChecksumHelper.java @@ -0,0 +1,323 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.client.checksum; + +import org.apache.hadoop.fs.FileChecksum; +import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.InMemoryConfiguration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.XceiverClientFactory; +import org.apache.hadoop.hdds.scm.XceiverClientGrpc; +import org.apache.hadoop.hdds.scm.XceiverClientReply; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.ozone.client.MockOmTransport; +import org.apache.hadoop.ozone.client.MockXceiverClientFactory; +import org.apache.hadoop.ozone.client.ObjectStore; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.protocolPB.OmTransport; +import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.Time; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.jetbrains.annotations.NotNull; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentMatchers; +import org.mockito.Mockito; + +import java.io.IOException; +import java.security.cert.X509Certificate; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; + +import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType.CRC32; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; + +/** + * Unit tests for ReplicatedFileChecksumHelper class. + */ +public class TestReplicatedFileChecksumHelper { + private OzoneClient client; + private ObjectStore store; + private OzoneVolume volume; + private RpcClient rpcClient; + + @Before + public void init() throws IOException { + ConfigurationSource config = new InMemoryConfiguration(); + rpcClient = new RpcClient(config, null) { + + @Override + protected OmTransport createOmTransport( + String omServiceId) + throws IOException { + return new MockOmTransport(); + } + + @NotNull + @Override + protected XceiverClientFactory createXceiverClientFactory( + List x509Certificates) + throws IOException { + return new MockXceiverClientFactory(); + } + }; + client = new OzoneClient(config, rpcClient); + + store = client.getObjectStore(); + } + + @After + public void close() throws IOException { + client.close(); + } + + + @Test + public void testEmptyBlock() throws IOException { + // test the file checksum of a file with an empty block. + RpcClient mockRpcClient = Mockito.mock(RpcClient.class); + + OzoneManagerProtocol om = Mockito.mock(OzoneManagerProtocol.class); + when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); + + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setVolumeName(null) + .setBucketName(null) + .setKeyName(null) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, new ArrayList<>()))) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setDataSize(0) + .setReplicationConfig(new RatisReplicationConfig( + HddsProtos.ReplicationFactor.ONE)) + .setFileEncryptionInfo(null) + .setAcls(null) + .build(); + + when(om.lookupKey(ArgumentMatchers.any())).thenReturn(omKeyInfo); + + OzoneVolume mockVolume = Mockito.mock(OzoneVolume.class); + when(mockVolume.getName()).thenReturn("vol1"); + OzoneBucket bucket = Mockito.mock(OzoneBucket.class); + when(bucket.getName()).thenReturn("bucket1"); + + ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( + mockVolume, bucket, "dummy", 10, mockRpcClient); + helper.compute(); + FileChecksum fileChecksum = helper.getFileChecksum(); + assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum); + assertEquals(DataChecksum.Type.CRC32, + ((MD5MD5CRC32GzipFileChecksum)fileChecksum).getCrcType()); + + // test negative length + helper = new ReplicatedFileChecksumHelper( + mockVolume, bucket, "dummy", -1, mockRpcClient); + helper.compute(); + assertNull(helper.getKeyLocationInfoList()); + } + + @Test + public void testOneBlock() throws IOException { + // test the file checksum of a file with one block. + OzoneConfiguration conf = new OzoneConfiguration(); + + RpcClient mockRpcClient = Mockito.mock(RpcClient.class); + + List dns = Arrays.asList( + DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build()); + Pipeline pipeline; + pipeline = Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setReplicationConfig( + new RatisReplicationConfig(HddsProtos.ReplicationFactor.THREE)) + .setState(Pipeline.PipelineState.CLOSED) + .setNodes(dns) + .build(); + + XceiverClientGrpc xceiverClientGrpc = + new XceiverClientGrpc(pipeline, conf) { + @Override + public XceiverClientReply sendCommandAsync( + ContainerProtos.ContainerCommandRequestProto request, + DatanodeDetails dn) { + return buildValidResponse(); + } + }; + XceiverClientFactory factory = Mockito.mock(XceiverClientFactory.class); + when(factory.acquireClientForReadData(ArgumentMatchers.any())). + thenReturn(xceiverClientGrpc); + + when(mockRpcClient.getXceiverClientManager()).thenReturn(factory); + + OzoneManagerProtocol om = Mockito.mock(OzoneManagerProtocol.class); + when(mockRpcClient.getOzoneManagerClient()).thenReturn(om); + + BlockID blockID = new BlockID(1, 1); + OmKeyLocationInfo omKeyLocationInfo = + new OmKeyLocationInfo.Builder().setPipeline(pipeline) + .setBlockID(blockID) + .build(); + + List omKeyLocationInfoList = + Arrays.asList(omKeyLocationInfo); + + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setVolumeName(null) + .setBucketName(null) + .setKeyName(null) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, omKeyLocationInfoList))) + .setCreationTime(Time.now()) + .setModificationTime(Time.now()) + .setDataSize(0) + .setReplicationConfig(new RatisReplicationConfig( + HddsProtos.ReplicationFactor.ONE)) + .setFileEncryptionInfo(null) + .setAcls(null) + .build(); + + when(om.lookupKey(ArgumentMatchers.any())).thenReturn(omKeyInfo); + + OzoneVolume mockVolume = Mockito.mock(OzoneVolume.class); + when(mockVolume.getName()).thenReturn("vol1"); + OzoneBucket bucket = Mockito.mock(OzoneBucket.class); + when(bucket.getName()).thenReturn("bucket1"); + + ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( + mockVolume, bucket, "dummy", 10, mockRpcClient); + + helper.compute(); + FileChecksum fileChecksum = helper.getFileChecksum(); + assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum); + assertEquals(1, helper.getKeyLocationInfos().size()); + } + + private XceiverClientReply buildValidResponse() { + // return a GetBlockResponse message of a block and its chunk checksums. + ContainerProtos.DatanodeBlockID blockID = + ContainerProtos.DatanodeBlockID.newBuilder() + .setContainerID(1) + .setLocalID(1) + .setBlockCommitSequenceId(1).build(); + + byte[] byteArray = new byte[10]; + ByteString byteString = ByteString.copyFrom(byteArray); + + ContainerProtos.ChecksumData checksumData = + ContainerProtos.ChecksumData.newBuilder() + .setType(CRC32) + .setBytesPerChecksum(1024) + .addChecksums(byteString) + .build(); + + ContainerProtos.ChunkInfo chunkInfo = + ContainerProtos.ChunkInfo.newBuilder() + .setChunkName("dummy_chunk") + .setOffset(1) + .setLen(10) + .setChecksumData(checksumData) + .build(); + + ContainerProtos.BlockData blockData = + ContainerProtos.BlockData.newBuilder() + .setBlockID(blockID) + .addChunks(chunkInfo) + .build(); + ContainerProtos.GetBlockResponseProto getBlockResponseProto + = ContainerProtos.GetBlockResponseProto.newBuilder() + .setBlockData(blockData) + .build(); + + ContainerProtos.ContainerCommandResponseProto resp = + ContainerProtos.ContainerCommandResponseProto.newBuilder() + .setCmdType(ContainerProtos.Type.GetBlock) + .setResult(ContainerProtos.Result.SUCCESS) + .setGetBlock(getBlockResponseProto) + .build(); + final CompletableFuture + replyFuture = new CompletableFuture<>(); + replyFuture.complete(resp); + return new XceiverClientReply(replyFuture); + } + + private OzoneBucket getOzoneBucket() throws IOException { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + store.createVolume(volumeName); + volume = store.getVolume(volumeName); + volume.createBucket(bucketName); + return volume.getBucket(bucketName); + } + + /** + * Write a real key and compute file checksum of it. + * @throws IOException + */ + @Test + public void testPutKeyChecksum() throws IOException { + String value = new String(new byte[1024], UTF_8); + OzoneBucket bucket = getOzoneBucket(); + + for (int i = 0; i < 1; i++) { + String keyName = UUID.randomUUID().toString(); + + try (OzoneOutputStream out = bucket + .createKey(keyName, value.getBytes(UTF_8).length, + ReplicationType.RATIS, ONE, new HashMap<>())) { + out.write(value.getBytes(UTF_8)); + out.write(value.getBytes(UTF_8)); + } + + ReplicatedFileChecksumHelper helper = new ReplicatedFileChecksumHelper( + volume, bucket, keyName, 10, rpcClient); + + helper.compute(); + FileChecksum fileChecksum = helper.getFileChecksum(); + assertTrue(fileChecksum instanceof MD5MD5CRC32GzipFileChecksum); + assertEquals(1, helper.getKeyLocationInfos().size()); + } + } +} \ No newline at end of file diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java new file mode 100644 index 00000000000..2ed26bf6aae --- /dev/null +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/checksum/package-info.java @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.client.checksum; + +/** + * This package contains test classes for Ozone Client checksum APIs. + */ \ No newline at end of file diff --git a/hadoop-ozone/client/src/test/resources/log4j.properties b/hadoop-ozone/client/src/test/resources/log4j.properties deleted file mode 100644 index 398786689af..00000000000 --- a/hadoop-ozone/client/src/test/resources/log4j.properties +++ /dev/null @@ -1,23 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# log4j configuration used during build and unit tests - -log4j.rootLogger=INFO,stdout -log4j.threshold=ALL -log4j.appender.stdout=org.apache.log4j.ConsoleAppender -log4j.appender.stdout.layout=org.apache.log4j.PatternLayout -log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} [%t] %-5p %c{2} (%F:%M(%L)) - %m%n diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 96c56580cd2..7571f4ec410 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -484,7 +484,7 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, // If this key is in a GDPR enforced bucket, then before moving // KeyInfo to deletedTable, remove the GDPR related metadata and // FileEncryptionInfo from KeyInfo. - if(Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { + if (Boolean.valueOf(keyInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { keyInfo.getMetadata().remove(OzoneConsts.GDPR_FLAG); keyInfo.getMetadata().remove(OzoneConsts.GDPR_ALGORITHM); keyInfo.getMetadata().remove(OzoneConsts.GDPR_SECRET); @@ -494,7 +494,7 @@ public static RepeatedOmKeyInfo prepareKeyForDelete(OmKeyInfo keyInfo, // Set the updateID keyInfo.setUpdateID(trxnLogIndex, isRatisEnabled); - if(repeatedOmKeyInfo == null) { + if (repeatedOmKeyInfo == null) { //The key doesn't exist in deletedTable, so create a new instance. repeatedOmKeyInfo = new RepeatedOmKeyInfo(keyInfo); } else { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java index 6a74342b8d2..7ca0634949c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OzoneAcl.java @@ -105,7 +105,7 @@ public OzoneAcl(ACLIdentityType type, String name, BitSet acls, Objects.requireNonNull(type); Objects.requireNonNull(acls); - if(acls.cardinality() > ACLType.getNoOfAcls()) { + if (acls.cardinality() > ACLType.getNoOfAcls()) { throw new IllegalArgumentException("Acl bitset passed has unexpected " + "size. bitset size:" + acls.cardinality() + ", bitset:" + acls.toString()); @@ -159,7 +159,7 @@ public static OzoneAcl parseAcl(String acl) AclScope aclScope = AclScope.ACCESS; // Check if acl string contains scope info. - if(parts[2].matches(ACL_SCOPE_REGEX)) { + if (parts[2].matches(ACL_SCOPE_REGEX)) { int indexOfOpenBracket = parts[2].indexOf("["); bits = parts[2].substring(0, indexOfOpenBracket); aclScope = AclScope.valueOf(parts[2].substring(indexOfOpenBracket + 1, @@ -194,7 +194,7 @@ public static List parseAcls(String acls) } List ozAcls = new ArrayList<>(); - for(String acl:parts) { + for (String acl:parts) { ozAcls.add(parseAcl(acl)); } return ozAcls; @@ -289,7 +289,7 @@ public BitSet getAclBitSet() { } public List getAclList() { - if(aclBitSet != null) { + if (aclBitSet != null) { return aclBitSet.stream().mapToObj(a -> ACLType.values()[a]).collect(Collectors.toList()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index c95f54b49d3..cdd9e526674 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -201,15 +201,15 @@ private OMConfigKeys() { public static final String DELEGATION_REMOVER_SCAN_INTERVAL_KEY = "ozone.manager.delegation.remover.scan.interval"; public static final long DELEGATION_REMOVER_SCAN_INTERVAL_DEFAULT = - 60*60*1000; + 60 * 60 * 1000; public static final String DELEGATION_TOKEN_RENEW_INTERVAL_KEY = "ozone.manager.delegation.token.renew-interval"; public static final long DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT = - 24*60*60*1000; // 1 day = 86400000 ms + 24 * 60 * 60 * 1000; // 1 day = 86400000 ms public static final String DELEGATION_TOKEN_MAX_LIFETIME_KEY = "ozone.manager.delegation.token.max-lifetime"; public static final long DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = - 7*24*60*60*1000; // 7 days + 7 * 24 * 60 * 60 * 1000; // 7 days public static final String OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY = "ozone.manager.db.checkpoint.transfer.bandwidthPerSec"; @@ -301,5 +301,10 @@ private OMConfigKeys() { "ozone.om.admin.protocol.wait.between.retries"; public static final long OZONE_OM_ADMIN_PROTOCOL_WAIT_BETWEEN_RETRIES_DEFAULT = 1000; + public static final String OZONE_OM_TRANSPORT_CLASS = + "ozone.om.transport.class"; + public static final String OZONE_OM_TRANSPORT_CLASS_DEFAULT = + "org.apache.hadoop.ozone.om.protocolPB" + + ".Hadoop3OmTransportFactory"; } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMNodeDetails.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMNodeDetails.java index 3de2f12134c..1a06601588f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMNodeDetails.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OMNodeDetails.java @@ -31,7 +31,7 @@ import java.net.InetSocketAddress; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_PORT_KEY; @@ -163,13 +163,13 @@ public String getOMDBCheckpointEnpointUrl(boolean isHttpPolicy) { if (isHttpPolicy) { if (StringUtils.isNotEmpty(getHttpAddress())) { return "http://" + getHttpAddress() + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT + + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT + "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true"; } } else { if (StringUtils.isNotEmpty(getHttpsAddress())) { return "https://" + getHttpsAddress() + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT + + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT + "?" + OZONE_DB_CHECKPOINT_REQUEST_FLUSH + "=true"; } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java index 1c8c18a4b39..9291d33d9af 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketArgs.java @@ -51,6 +51,10 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { private long quotaInBytes; private long quotaInNamespace; + /** + * Bucket Owner Name. + */ + private String ownerName; /** * Private constructor, constructed via builder. @@ -61,9 +65,11 @@ public final class OmBucketArgs extends WithMetadata implements Auditable { * @param quotaInBytes Volume quota in bytes. * @param quotaInNamespace Volume quota in counts. */ + @SuppressWarnings("checkstyle:ParameterNumber") private OmBucketArgs(String volumeName, String bucketName, Boolean isVersionEnabled, StorageType storageType, - Map metadata, long quotaInBytes, long quotaInNamespace) { + Map metadata, long quotaInBytes, long quotaInNamespace, + String ownerName) { this.volumeName = volumeName; this.bucketName = bucketName; this.isVersionEnabled = isVersionEnabled; @@ -71,6 +77,7 @@ private OmBucketArgs(String volumeName, String bucketName, this.metadata = metadata; this.quotaInBytes = quotaInBytes; this.quotaInNamespace = quotaInNamespace; + this.ownerName = ownerName; } /** @@ -121,6 +128,14 @@ public long getQuotaInNamespace() { return quotaInNamespace; } + /** + * Returns Bucket Owner Name. + * @return ownerName. + */ + public String getOwnerName() { + return ownerName; + } + /** * Returns new builder class that builds a OmBucketArgs. * @return Builder @@ -138,9 +153,12 @@ public Map toAuditMap() { this.metadata.get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.IS_VERSION_ENABLED, String.valueOf(this.isVersionEnabled)); - if(this.storageType != null){ + if (this.storageType != null) { auditMap.put(OzoneConsts.STORAGE_TYPE, this.storageType.name()); } + if (this.ownerName != null) { + auditMap.put(OzoneConsts.OWNER, this.ownerName); + } return auditMap; } @@ -155,7 +173,7 @@ public static class Builder { private Map metadata; private long quotaInBytes; private long quotaInNamespace; - + private String ownerName; /** * Constructs a builder. */ @@ -199,6 +217,11 @@ public Builder setQuotaInNamespace(long quota) { return this; } + public Builder setOwnerName(String owner) { + ownerName = owner; + return this; + } + /** * Constructs the OmBucketArgs. * @return instance of OmBucketArgs. @@ -207,7 +230,7 @@ public OmBucketArgs build() { Preconditions.checkNotNull(volumeName); Preconditions.checkNotNull(bucketName); return new OmBucketArgs(volumeName, bucketName, isVersionEnabled, - storageType, metadata, quotaInBytes, quotaInNamespace); + storageType, metadata, quotaInBytes, quotaInNamespace, ownerName); } } @@ -218,18 +241,21 @@ public BucketArgs getProtobuf() { BucketArgs.Builder builder = BucketArgs.newBuilder(); builder.setVolumeName(volumeName) .setBucketName(bucketName); - if(isVersionEnabled != null) { + if (isVersionEnabled != null) { builder.setIsVersionEnabled(isVersionEnabled); } - if(storageType != null) { + if (storageType != null) { builder.setStorageType(storageType.toProto()); } - if(quotaInBytes > 0 || quotaInBytes == OzoneConsts.QUOTA_RESET) { + if (quotaInBytes > 0 || quotaInBytes == OzoneConsts.QUOTA_RESET) { builder.setQuotaInBytes(quotaInBytes); } - if(quotaInNamespace > 0 || quotaInNamespace == OzoneConsts.QUOTA_RESET) { + if (quotaInNamespace > 0 || quotaInNamespace == OzoneConsts.QUOTA_RESET) { builder.setQuotaInNamespace(quotaInNamespace); } + if (ownerName != null) { + builder.setOwnerName(ownerName); + } return builder.build(); } @@ -247,6 +273,8 @@ public static OmBucketArgs getFromProtobuf(BucketArgs bucketArgs) { bucketArgs.getStorageType()) : null, KeyValueUtil.getFromProtobuf(bucketArgs.getMetadataList()), bucketArgs.getQuotaInBytes(), - bucketArgs.getQuotaInNamespace()); + bucketArgs.getQuotaInNamespace(), + bucketArgs.hasOwnerName() ? + bucketArgs.getOwnerName() : null); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index 786bb74a75b..ad81c8b016f 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -91,7 +91,7 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { */ private BucketLayout bucketLayout; - private final String owner; + private String owner; /** * Private constructor, constructed via builder. @@ -297,6 +297,14 @@ public String getOwner() { return owner; } + public void setModificationTime(long modificationTime) { + this.modificationTime = modificationTime; + } + + public void setOwner(String ownerName) { + this.owner = ownerName; + } + /** * Returns new builder class that builds a OmBucketInfo. * @@ -311,6 +319,7 @@ public Map toAuditMap() { Map auditMap = new LinkedHashMap<>(); auditMap.put(OzoneConsts.VOLUME, this.volumeName); auditMap.put(OzoneConsts.BUCKET, this.bucketName); + auditMap.put(OzoneConsts.BUCKET_LAYOUT, String.valueOf(this.bucketLayout)); auditMap.put(OzoneConsts.GDPR_FLAG, this.metadata.get(OzoneConsts.GDPR_FLAG)); auditMap.put(OzoneConsts.ACLS, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java index d6f22430f9a..485cf32a7a6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java @@ -150,7 +150,7 @@ public long getParentObjectID() { public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() { - return keyLocationVersions.size() == 0? null : + return keyLocationVersions.size() == 0 ? null : keyLocationVersions.get(keyLocationVersions.size() - 1); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java index 1504f4e35e9..9df7518db4a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java @@ -225,7 +225,7 @@ public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) { getPipeline(keyLocation), keyLocation.getLength(), keyLocation.getOffset(), keyLocation.getPartNumber()); - if(keyLocation.hasToken()) { + if (keyLocation.hasToken()) { info.token = (Token) OzonePBHelper.tokenFromProto(keyLocation.getToken()); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java index e312138b5f9..ec660684f54 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java @@ -165,7 +165,7 @@ void appendNewBlocks(List newLocationList) { } } - void removeBlocks(long versionToRemove){ + void removeBlocks(long versionToRemove) { locationVersionMap.remove(versionToRemove); } @@ -181,7 +181,7 @@ public String toString() { sb.append("version:").append(version).append(" "); sb.append("isMultipartKey:").append(isMultipartKey); for (List kliList : locationVersionMap.values()) { - for(OmKeyLocationInfo kli: kliList) { + for (OmKeyLocationInfo kli: kliList) { sb.append(kli.getLocalID()).append(" || "); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index 85165d6e5b5..91cc19c3143 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -94,7 +94,7 @@ public void setQuotaInBytes(long quotaInBytes) { } public void setQuotaInNamespace(long quotaInNamespace) { - this.quotaInNamespace= quotaInNamespace; + this.quotaInNamespace = quotaInNamespace; } public void setCreationTime(long time) { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java index 0ca1e36e225..94dff5115e9 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclUtil.java @@ -43,7 +43,7 @@ */ public final class OzoneAclUtil { - private OzoneAclUtil(){ + private OzoneAclUtil() { } /** @@ -60,7 +60,7 @@ public static List getAclList(String userName, // User ACL. listOfAcls.add(new OzoneAcl(USER, userName, userRights, ACCESS)); - if(userGroups != null) { + if (userGroups != null) { // Group ACLs of the User. Arrays.asList(userGroups).forEach((group) -> listOfAcls.add( new OzoneAcl(GROUP, group, groupRights, ACCESS))); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index 3f09d4ac3be..1de593441e2 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -31,7 +31,7 @@ */ public final class OzoneFSUtils { - private OzoneFSUtils() {} + private OzoneFSUtils() { } /** * Returns string representation of path after removing the leading slash. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java index cde8e3901aa..83a7184123a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java @@ -54,7 +54,7 @@ public List getOmKeyInfoList() { public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo repeatedKeyInfo) { List list = new ArrayList<>(); - for(KeyInfo k : repeatedKeyInfo.getKeyInfoList()) { + for (KeyInfo k : repeatedKeyInfo.getKeyInfoList()) { list.add(OmKeyInfo.getFromProtobuf(k)); } return new RepeatedOmKeyInfo.Builder().setOmKeyInfos(list).build(); @@ -67,7 +67,7 @@ public static RepeatedOmKeyInfo getFromProto(RepeatedKeyInfo */ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { List list = new ArrayList<>(); - for(OmKeyInfo k : omKeyInfoList) { + for (OmKeyInfo k : omKeyInfoList) { list.add(k.getProtobuf(compact, clientVersion)); } @@ -82,7 +82,7 @@ public RepeatedKeyInfo getProto(boolean compact, int clientVersion) { public static class Builder { private List omKeyInfos; - public Builder(){} + public Builder() { } public Builder setOmKeyInfos(List infoList) { this.omKeyInfos = infoList; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java index c6eb5ddc0ae..0a8b1d6f67c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/ServiceInfo.java @@ -62,7 +62,7 @@ public final class ServiceInfo { /** * Default constructor for JSON deserialization. */ - public ServiceInfo() {} + public ServiceInfo() { } /** * Constructs the ServiceInfo for the {@code nodeType}. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java index 6b12f13e634..eebb4d87517 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/WithObjectID.java @@ -61,7 +61,7 @@ public long getUpdateID() { * @param obId - long */ public void setObjectID(long obId) { - if(this.objectID != 0) { + if (this.objectID != 0) { throw new UnsupportedOperationException("Attempt to modify object ID " + "which is not zero. Current Object ID is " + this.objectID); } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java index 7bc67da7fa4..7da3bb8406c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocol/OzoneManagerProtocol.java @@ -199,6 +199,18 @@ default void setBucketProperty(OmBucketArgs args) throws IOException { "this to be implemented, as write requests use a new approach."); } + /** + * Changes the owner of a bucket. + * @param args - OMBucketArgs + * @return true if operation succeeded, false if specified user is + * already the owner. + * @throws IOException + */ + default boolean setBucketOwner(OmBucketArgs args) throws IOException { + throw new UnsupportedOperationException("OzoneManager does not require " + + "this to be implemented, as write requests use a new approach."); + } + /** * Open the given key and return an open key session. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java index 1ffc861f805..2eb11d03202 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OmTransportFactory.java @@ -24,6 +24,9 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.security.UserGroupInformation; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS_DEFAULT; + /** * Factory pattern to create object for RPC communication with OM. */ @@ -34,20 +37,29 @@ OmTransport createOmTransport(ConfigurationSource source, static OmTransport create(ConfigurationSource conf, UserGroupInformation ugi, String omServiceId) throws IOException { - OmTransportFactory factory = createFactory(); + OmTransportFactory factory = createFactory(conf); return factory.createOmTransport(conf, ugi, omServiceId); } - static OmTransportFactory createFactory() throws IOException { - ServiceLoader transportFactoryServiceLoader = - ServiceLoader.load(OmTransportFactory.class); - Iterator iterator = - transportFactoryServiceLoader.iterator(); - if (iterator.hasNext()) { - return iterator.next(); - } + static OmTransportFactory createFactory(ConfigurationSource conf) + throws IOException { try { + // if configured transport class is different than the default + // Hadoop3OmTransportFactory, then check service loader for + // transport class and instantiate it + if (conf + .get(OZONE_OM_TRANSPORT_CLASS, + OZONE_OM_TRANSPORT_CLASS_DEFAULT) != + OZONE_OM_TRANSPORT_CLASS_DEFAULT) { + ServiceLoader transportFactoryServiceLoader = + ServiceLoader.load(OmTransportFactory.class); + Iterator iterator = + transportFactoryServiceLoader.iterator(); + if (iterator.hasNext()) { + return iterator.next(); + } + } return OmTransportFactory.class.getClassLoader() .loadClass( "org.apache.hadoop.ozone.om.protocolPB" diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index b2c367f9c76..be758a06d22 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -526,6 +526,28 @@ public void setBucketProperty(OmBucketArgs args) } + /** + * {@inheritDoc} + */ + @Override + public boolean setBucketOwner(OmBucketArgs args) + throws IOException { + SetBucketPropertyRequest.Builder req = + SetBucketPropertyRequest.newBuilder(); + BucketArgs bucketArgs = args.getProtobuf(); + req.setBucketArgs(bucketArgs); + + OMRequest omRequest = createOMRequest(Type.SetBucketProperty) + .setSetBucketPropertyRequest(req) + .build(); + + OMResponse omResponse = submitRequest(omRequest); + SetBucketPropertyResponse response = + handleError(omResponse).getSetBucketPropertyResponse(); + + return response.getResponse(); + } + /** * List buckets in a volume. * @@ -581,7 +603,7 @@ public OpenKeySession openKey(OmKeyArgs args) throws IOException { .setBucketName(args.getBucketName()) .setKeyName(args.getKeyName()); - if(args.getAcls() != null) { + if (args.getAcls() != null) { keyArgs.addAllAcls(args.getAcls().stream().distinct().map(a -> OzoneAcl.toProtobuf(a)).collect(Collectors.toList())); } @@ -1209,7 +1231,7 @@ public Token getDelegationToken(Text renewer) OMPBHelper.convertToDelegationToken(resp.getResponse().getToken()) : null; } catch (IOException e) { - if(e instanceof OMException) { + if (e instanceof OMException) { throw (OMException)e; } throw new OMException("Get delegation token failed.", e, @@ -1241,7 +1263,7 @@ public long renewDelegationToken(Token token) .getRenewDelegationTokenResponse(); return resp.getResponse().getNewExpiryTime(); } catch (IOException e) { - if(e instanceof OMException) { + if (e instanceof OMException) { throw (OMException)e; } throw new OMException("Renew delegation token failed.", e, @@ -1270,7 +1292,7 @@ public void cancelDelegationToken(Token token) try { handleError(submitRequest(omRequest)); } catch (IOException e) { - if(e instanceof OMException) { + if (e instanceof OMException) { throw (OMException)e; } throw new OMException("Cancel delegation token failed.", e, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java index 2ff2dc830a2..51c7d54d6c6 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OMPBHelper.java @@ -50,7 +50,7 @@ private OMPBHelper() { * @return tokenProto */ public static TokenProto convertToTokenProto(Token tok) { - if(tok == null){ + if (tok == null) { throw new IllegalArgumentException("Invalid argument: token is null"); } @@ -86,9 +86,9 @@ public static BucketEncryptionKeyInfo convert( } return new BucketEncryptionKeyInfo( - beInfo.hasCryptoProtocolVersion()? + beInfo.hasCryptoProtocolVersion() ? convert(beInfo.getCryptoProtocolVersion()) : null, - beInfo.hasSuite()? convert(beInfo.getSuite()) : null, + beInfo.hasSuite() ? convert(beInfo.getSuite()) : null, beInfo.getKeyName()); } @@ -106,7 +106,7 @@ public static BucketEncryptionInfoProto convert( if (beInfo.getSuite() != null) { bb.setSuite(convert(beInfo.getSuite())); } - if (beInfo.getVersion()!= null) { + if (beInfo.getVersion() != null) { bb.setCryptoProtocolVersion(convert(beInfo.getVersion())); } return bb.build(); @@ -142,7 +142,7 @@ public static FileEncryptionInfo convert(FileEncryptionInfoProto proto) { } public static CipherSuite convert(CipherSuiteProto proto) { - switch(proto) { + switch (proto) { case AES_CTR_NOPADDING: return CipherSuite.AES_CTR_NOPADDING; default: @@ -166,7 +166,7 @@ public static CipherSuiteProto convert(CipherSuite suite) { public static CryptoProtocolVersionProto convert( CryptoProtocolVersion version) { - switch(version) { + switch (version) { case UNKNOWN: return OzoneManagerProtocolProtos.CryptoProtocolVersionProto .UNKNOWN_PROTOCOL_VERSION; @@ -180,7 +180,7 @@ public static CryptoProtocolVersionProto convert( public static CryptoProtocolVersion convert( CryptoProtocolVersionProto proto) { - switch(proto) { + switch (proto) { case ENCRYPTION_ZONES: return CryptoProtocolVersion.ENCRYPTION_ZONES; default: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java index 76fb76a8e41..09c8743137d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/security/acl/OzoneObjInfo.java @@ -111,7 +111,7 @@ public static OzoneObjInfo fromProtobuf(OzoneManagerProtocolProtos.OzoneObj .setStoreType(StoreType.valueOf(proto.getStoreType().name())); String[] tokens = StringUtils.split(proto.getPath(), OZONE_URI_DELIMITER, 3); - if(tokens == null) { + if (tokens == null) { throw new IllegalArgumentException("Unexpected path:" + proto.getPath()); } // Set volume name. diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java index 0b1b787342c..85e452ed0fb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/OzoneVersionInfo.java @@ -44,7 +44,7 @@ public final class OzoneVersionInfo { public static final RatisVersionInfo RATIS_VERSION_INFO = new RatisVersionInfo(); - private OzoneVersionInfo() {} + private OzoneVersionInfo() { } public static void main(String[] args) { System.out.println( diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java index d7794dbbf7c..0f859732aac 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/util/RadixTree.java @@ -129,7 +129,7 @@ private boolean removePrefixPathInternal(RadixNode current, return false; } - if (removePrefixPathInternal(node, path, level+1)) { + if (removePrefixPathInternal(node, path, level + 1)) { current.getChildren().remove(name); return current.hasChildren(); } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java index 12b0d408654..052ff8ff62e 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/ha/TestOMFailoverProxyProvider.java @@ -180,7 +180,7 @@ public void testCanonicalTokenServiceName() throws IOException { String nodeId = NODE_ID_BASE_STR + i; ozoneConf.set( ConfUtils.addKeySuffixes(OZONE_OM_ADDRESS_KEY, OM_SERVICE_ID, - nodeId), nodeAddrs.get(i-1)); + nodeId), nodeAddrs.get(i - 1)); allNodeIds.add(nodeId); } ozoneConf.set(ConfUtils.addKeySuffixes(OZONE_OM_NODES_KEY, OM_SERVICE_ID), diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 16285c20170..de12e795a3b 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -70,7 +70,7 @@ private void testResourceReacquireLock(String[] resourceName, // Lock re-acquire not allowed by same thread. if (resource == OzoneManagerLock.Resource.USER_LOCK || resource == OzoneManagerLock.Resource.S3_SECRET_LOCK || - resource == OzoneManagerLock.Resource.PREFIX_LOCK){ + resource == OzoneManagerLock.Resource.PREFIX_LOCK) { lock.acquireWriteLock(resource, resourceName); try { lock.acquireWriteLock(resource, resourceName); diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java index 39c622043ba..1ddf353c127 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java @@ -55,7 +55,7 @@ public void testKeyGenerationWithValidInput() throws Exception { @Test public void testKeyGenerationWithInvalidInput() throws Exception { GDPRSymmetricKey gkey = null; - try{ + try { gkey = new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5), OzoneConsts.GDPR_ALGORITHM_NAME); } catch (IllegalArgumentException ex) { diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java index ab24b1b5925..2db0dbbd8a9 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneObjInfo.java @@ -67,10 +67,10 @@ public void testGetBucketName() { objInfo = getBuilder(volume, bucket, key).build(); assertEquals(objInfo.getBucketName(), bucket); - objInfo =getBuilder(volume, null, null).build(); + objInfo = getBuilder(volume, null, null).build(); assertEquals(objInfo.getBucketName(), null); - objInfo =getBuilder(null, bucket, null).build(); + objInfo = getBuilder(null, bucket, null).build(); assertEquals(objInfo.getBucketName(), bucket); } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java index b97b8445dbc..817885ea930 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/util/TestRadixTree.java @@ -80,13 +80,13 @@ public void testGetLongestPrefix() { public void testGetLongestPrefixPath() { List> lpp = ROOT.getLongestPrefixPath("/a/b/c/d/g/p"); - RadixNode lpn = lpp.get(lpp.size()-1); + RadixNode lpn = lpp.get(lpp.size() - 1); assertEquals("g", lpn.getName()); lpn.setValue(100); List> lpq = ROOT.getLongestPrefixPath("/a/b/c/d/g/q"); - RadixNode lqn = lpp.get(lpq.size()-1); + RadixNode lqn = lpp.get(lpq.size() - 1); System.out.print(RadixTree.radixPathToString(lpq)); assertEquals(lpn, lqn); assertEquals("g", lqn.getName()); diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh index f5b2cc90afd..dc071324a51 100755 --- a/hadoop-ozone/dev-support/checks/bats.sh +++ b/hadoop-ozone/dev-support/checks/bats.sh @@ -21,13 +21,19 @@ source "${DIR}/_lib.sh" install_bats +git clone https://github.com/bats-core/bats-assert dev-support/ci/bats-assert +git clone https://github.com/bats-core/bats-support dev-support/ci/bats-support + REPORT_DIR=${OUTPUT_DIR:-"${DIR}/../../../target/bats"} mkdir -p "${REPORT_DIR}" REPORT_FILE="${REPORT_DIR}/summary.txt" rm -f "${REPORT_DIR}/output.log" -find * -path '*/src/test/shell/*' -name '*.bats' -print0 \ +find * \( \ + -path '*/src/test/shell/*' -name '*.bats' \ + -or -path dev-support/ci/selective_ci_checks.bats \ + \) -print0 \ | xargs -0 -n1 bats --formatter tap \ | tee -a "${REPORT_DIR}/output.log" diff --git a/hadoop-ozone/dev-support/checks/checkstyle.sh b/hadoop-ozone/dev-support/checks/checkstyle.sh index e76a67a01e1..bd5e7f0f199 100755 --- a/hadoop-ozone/dev-support/checks/checkstyle.sh +++ b/hadoop-ozone/dev-support/checks/checkstyle.sh @@ -44,6 +44,9 @@ find "." -name checkstyle-errors.xml -print0 \ -e 's//g" \ | tee "$REPORT_FILE" ## generate counter diff --git a/hadoop-ozone/dev-support/checks/coverage.sh b/hadoop-ozone/dev-support/checks/coverage.sh index 75d6126483e..dee0db9e125 100755 --- a/hadoop-ozone/dev-support/checks/coverage.sh +++ b/hadoop-ozone/dev-support/checks/coverage.sh @@ -51,7 +51,6 @@ find target/coverage-classes -name proto -type d | xargs rm -rf find target/coverage-classes -name generated -type d | xargs rm -rf find target/coverage-classes -name v1 -type d | xargs rm -rf find target/coverage-classes -name freon -type d | xargs rm -rf -find target/coverage-classes -name genesis -type d | xargs rm -rf #generate the reports jacoco report "$REPORT_DIR/jacoco-all.exec" --classfiles target/coverage-classes --html "$REPORT_DIR/all" --xml "$REPORT_DIR/all.xml" diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 72ce7b77d6c..8a098c7c27b 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -28,7 +28,7 @@ UTF-8 true - 20210329-1 + 20211202-1 apache/ozone-testkrb5:20210419-1 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config index e3fbb6a16ee..31050a2355b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-csi/docker-config @@ -21,6 +21,7 @@ OZONE-SITE.XML_ozone.csi.socket=/tmp/csi.sock OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index 5b2632d6ba8..fa38aad00dd 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -38,6 +38,10 @@ OZONE-SITE.XML_ozone.datanode.pipeline.limit=1 OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_hdds.container.report.interval=60s +OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon +OZONE-SITE.XML_ozone.recon.address=recon:9891 +OZONE-SITE.XML_ozone.recon.http-address=0.0.0.0:9888 +OZONE-SITE.XML_ozone.recon.https-address=0.0.0.0:9889 OZONE_CONF_DIR=/etc/hadoop OZONE_LOG_DIR=/var/log/hadoop diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-config b/hadoop-ozone/dist/src/main/compose/ozone/docker-config index d13d135674e..1dbeed06387 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-config @@ -19,6 +19,7 @@ CORE-SITE.XML_fs.trash.interval=1 OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index 5aacbfcbdb8..59a90ed5d21 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -53,12 +53,6 @@ execute_robot_test scm admincli execute_robot_test scm -v SCHEME:ofs -v BUCKET_TYPE:link -N ozonefs-fso-ofs-link ozonefs/ozonefs.robot execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-fso-o3fs-bucket ozonefs/ozonefs.robot -execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectputget s3/objectputget.robot -execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectdelete s3/objectdelete.robot -execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectcopy s3/objectcopy.robot -execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-objectmultidelete s3/objectmultidelete.robot -execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket}-fso-layout-MultipartUpload s3/MultipartUpload.robot - stop_docker_env generate_report diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/HTTP.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/HTTP.keytab deleted file mode 100755 index 073055f48c3..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/HTTP.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/dn.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/dn.keytab deleted file mode 100755 index 263047401e4..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/dn.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/om.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/om.keytab deleted file mode 100755 index 1241ec25ce0..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/om.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/recon.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/recon.keytab deleted file mode 100755 index 4a35968edc9..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/recon.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/s3g.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/s3g.keytab deleted file mode 100755 index 8cbe7cd6f9c..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/s3g.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/scm.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/scm.keytab deleted file mode 100755 index 9bbfd84dc6f..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/scm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/testuser.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/testuser.keytab deleted file mode 100755 index 5242d2e75d2..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/testuser.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/testuser2.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/testuser2.keytab deleted file mode 100755 index 9adeb6e408d..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/keytabs/testuser2.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config index 497e0d5d10e..5cfe4f02be7 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/docker-config @@ -16,6 +16,7 @@ OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/HTTP.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/HTTP.keytab deleted file mode 100755 index 073055f48c3..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/HTTP.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/dn.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/dn.keytab deleted file mode 100755 index 263047401e4..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/dn.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/om.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/om.keytab deleted file mode 100755 index 1241ec25ce0..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/om.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/recon.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/recon.keytab deleted file mode 100755 index 4a35968edc9..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/recon.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/s3g.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/s3g.keytab deleted file mode 100755 index 8cbe7cd6f9c..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/s3g.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/scm.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/scm.keytab deleted file mode 100755 index 9bbfd84dc6f..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/scm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/testuser.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/testuser.keytab deleted file mode 100755 index 5242d2e75d2..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/testuser.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/testuser2.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/testuser2.keytab deleted file mode 100755 index 9adeb6e408d..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/keytabs/testuser2.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh index dca98212aef..85992f1c38b 100755 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-mr/test.sh @@ -27,6 +27,9 @@ export SECURITY_ENABLED=true start_docker_env +execute_command_in_container rm sudo bash -c "sed -i -e 's/^mirrorlist/#&/' -e 's/^#baseurl/baseurl/' -e 's/mirror.centos.org/vault.centos.org/' /etc/yum.repos.d/*.repo" +execute_command_in_container rm sudo yum install -y krb5-workstation + execute_robot_test om kinit.robot execute_robot_test om createmrenv.robot @@ -37,7 +40,6 @@ export OZONE_DIR=/opt/ozone # shellcheck source=/dev/null source "$COMPOSE_DIR/../testlib.sh" -execute_command_in_container rm sudo yum install -y krb5-workstation execute_robot_test rm kinit-hadoop.robot for scheme in o3fs ofs; do diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config index f4fcc637248..bbba3632b38 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config @@ -19,6 +19,7 @@ CORE-SITE.XML_fs.trash.interval=1 OZONE-SITE.XML_ozone.om.address=om OZONE-SITE.XML_ozone.om.http-address=om:9874 +OZONE-SITE.XML_ozone.scm.http-address=scm:9876 OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 @@ -39,6 +40,7 @@ OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon OZONE-SITE.XML_ozone.recon.om.snapshot.task.initial.delay=20s OZONE-SITE.XML_ozone.recon.address=recon:9891 +OZONE-SITE.XML_ozone.scm.ratis.enable=false OZONE-SITE.XML_ozone.security.enabled=true OZONE-SITE.XML_ozone.acl.enabled=true diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/HTTP.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/HTTP.keytab deleted file mode 100755 index 073055f48c3..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/HTTP.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/dn.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/dn.keytab deleted file mode 100755 index 263047401e4..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/dn.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/om.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/om.keytab deleted file mode 100755 index 1241ec25ce0..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/om.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/recon.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/recon.keytab deleted file mode 100755 index 4a35968edc9..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/recon.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/s3g.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/s3g.keytab deleted file mode 100755 index 8cbe7cd6f9c..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/s3g.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/scm.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/scm.keytab deleted file mode 100755 index 9bbfd84dc6f..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/scm.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/testuser.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/testuser.keytab deleted file mode 100755 index 5242d2e75d2..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/testuser.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/testuser2.keytab b/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/testuser2.keytab deleted file mode 100755 index 9adeb6e408d..00000000000 Binary files a/hadoop-ozone/dist/src/main/compose/ozonesecure/keytabs/testuser2.keytab and /dev/null differ diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index b204085aff4..8e01004d8d7 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -24,7 +24,9 @@ OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.ratis.enable=true - +# setting ozone.scm.ratis.enable to false for now, as scm ha upgrade is +# not supported yet. This is supposed to work without SCM HA configuration +OZONE-SITE.XML_ozone.scm.ratis.enable=false OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 OZONE-SITE.XML_ozone.scm.names=scm diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config index 1a7419c52bc..afecf1d6a29 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config @@ -29,7 +29,7 @@ OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data OZONE-SITE.XML_ozone.scm.block.client.address=scm OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.client.address=scm - +OZONE-SITE.XML_ozone.scm.ratis.enable=false OZONE-SITE.XML_hdds.datanode.dir=/data/hdds OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon diff --git a/hadoop-ozone/dist/src/main/docker/Dockerfile b/hadoop-ozone/dist/src/main/docker/Dockerfile index 13ba07ce69a..4dc81f2390e 100644 --- a/hadoop-ozone/dist/src/main/docker/Dockerfile +++ b/hadoop-ozone/dist/src/main/docker/Dockerfile @@ -16,7 +16,6 @@ FROM apache/ozone-runner:@docker.ozone-runner.version@ -ENV PATH /opt/hadoop/libexec:${PATH} ADD --chown=hadoop . /opt/hadoop diff --git a/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh index 7c0732efc6f..65e709da29e 100755 --- a/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh +++ b/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh @@ -97,7 +97,7 @@ if [ -n "$KERBEROS_ENABLED" ]; then sudo sed -i "s/krb5/$KERBEROS_SERVER/g" "/etc/krb5.conf" || true fi -CONF_DESTINATION_DIR="${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}" +CONF_DESTINATION_DIR="${OZONE_CONF_DIR:-/opt/hadoop/etc/hadoop}" #Try to copy the defaults set +e diff --git a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh index 56eedd95b8a..0ae44e41515 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/testlib.sh @@ -116,7 +116,10 @@ regenerate_resources() { OZONE_ROOT=$(realpath ../../..) fi - flekszible generate -t mount:hostPath="$OZONE_ROOT",path=/opt/hadoop -t image:image=apache/ozone-runner:20200420-1 -t ozone/onenode + local default_version=${docker.ozone-runner.version} # set at build-time from Maven property + local runner_version=${OZONE_RUNNER_VERSION:-${default_version}} # may be specified by user running the test + + flekszible generate -t mount:hostPath="$OZONE_ROOT",path=/opt/hadoop -t image:image=apache/ozone-runner:${runner_version} -t ozone/onenode } revert_resources() { diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot index fc133c10450..511679c56f4 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot @@ -16,18 +16,21 @@ *** Settings *** Documentation Read Compatibility Resource ../ozone-lib/shell.robot +Resource setup.robot Test Timeout 5 minutes +Suite Setup Create Local Test File *** Variables *** ${SUFFIX} ${EMPTY} *** Test Cases *** Key Can Be Read - Key Should Match Local File /vol1/bucket1/key-${SUFFIX} /etc/passwd + Key Should Match Local File /vol1/bucket1/key-${SUFFIX} ${TESTFILE} Dir Can Be Listed Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX} File Can Be Get - Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/passwd /tmp/passwd-${SUFFIX} - [teardown] Execute rm /tmp/passwd-${SUFFIX} + Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} /tmp/ + Execute diff -q ${TESTFILE} /tmp/file-${SUFFIX} + [teardown] Execute rm /tmp/file-${SUFFIX} diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot new file mode 100644 index 00000000000..ae765f23e2b --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/setup.robot @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Setup for Compatibility Tests +Library OperatingSystem +Resource ../ozone-lib/shell.robot + +*** Variables *** +${SUFFIX} ${EMPTY} + + +*** Keywords *** +Create Local Test File + Set Suite Variable ${TESTFILE} /tmp/test-data-${SUFFIX}.txt + Create File ${TESTFILE} Compatibility Test diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot index f5c92012571..4c611d4287b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot @@ -16,17 +16,20 @@ *** Settings *** Documentation Write Compatibility Resource ../ozone-lib/shell.robot +Resource setup.robot Test Timeout 5 minutes +Suite Setup Create Local Test File *** Variables *** ${SUFFIX} ${EMPTY} + *** Test Cases *** Key Can Be Written - Create Key /vol1/bucket1/key-${SUFFIX} /etc/passwd + Create Key /vol1/bucket1/key-${SUFFIX} ${TESTFILE} Dir Can Be Created Execute ozone fs -mkdir o3fs://bucket1.vol1/dir-${SUFFIX} File Can Be Put - Execute ozone fs -put /etc/passwd o3fs://bucket1.vol1/dir-${SUFFIX}/ + Execute ozone fs -put ${TESTFILE} o3fs://bucket1.vol1/dir-${SUFFIX}/file-${SUFFIX} diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 777de10fd9d..3b5ac09a1aa 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -43,7 +43,6 @@ function ozone_usage ozone_add_subcommand "freon" client "runs an ozone data generator" ozone_add_subcommand "fs" client "run a filesystem command on Ozone file system. Equivalent to 'hadoop fs'" ozone_add_subcommand "genconf" client "generate minimally required ozone configs and output to ozone-site.xml in specified path" - ozone_add_subcommand "genesis" client "runs a collection of ozone benchmarks to help with tuning." ozone_add_subcommand "getconf" client "get ozone config values from configuration" ozone_add_subcommand "jmxget" admin "get JMX exported values from NameNode or DataNode." ozone_add_subcommand "om" daemon "Ozone Manager" @@ -133,22 +132,6 @@ function ozonecmd_case OZONE_FREON_OPTS="${OZONE_FREON_OPTS}" OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; - genesis) - ARTIFACT_LIB_DIR="${OZONE_HOME}/share/ozone/lib/ozone-tools" - mkdir -p "$ARTIFACT_LIB_DIR" - if [[ ! -f "$ARTIFACT_LIB_DIR/jmh-core-1.23.jar" ]]; then - echo "jmh-core jar is missing from $ARTIFACT_LIB_DIR, trying to download from maven central (License: GPL + classpath exception)" - curl -o "$ARTIFACT_LIB_DIR/jmh-core-1.23.jar" https://repo1.maven.org/maven2/org/openjdk/jmh/jmh-core/1.23/jmh-core-1.23.jar - fi - - if [[ ! -f "$ARTIFACT_LIB_DIR/jopt-simple-4.6.jar" ]]; then - echo "jopt jar is missing from $ARTIFACT_LIB_DIR, trying to download from maven central (License: MIT License)" - curl -o "$ARTIFACT_LIB_DIR/jopt-simple-4.6.jar" https://repo1.maven.org/maven2/net/sf/jopt-simple/jopt-simple/4.6/jopt-simple-4.6.jar - fi - - OZONE_CLASSNAME=org.apache.hadoop.ozone.genesis.Genesis - OZONE_RUN_ARTIFACT_NAME="ozone-tools" - ;; getconf) OZONE_CLASSNAME=org.apache.hadoop.ozone.conf.OzoneGetConf; OZONE_RUN_ARTIFACT_NAME="ozone-tools" diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index 728ded031e4..b6ef8651b2e 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.time.Duration; -import java.util.Arrays; import java.util.Collections; import java.util.HashSet; import java.util.List; @@ -102,15 +101,13 @@ public static FailureService of(String serviceName) { @SuppressWarnings("parameternumber") public MiniOzoneChaosCluster(OzoneConfiguration conf, - List ozoneManagers, List scms, - List hddsDatanodes, String omServiceID, - String scmServiceId, String clusterPath, + OMHAService omService, SCMHAService scmService, + List hddsDatanodes, String clusterPath, Set> clazzes) { - super(conf, ozoneManagers, scms, hddsDatanodes, omServiceID, scmServiceId, - clusterPath); + super(conf, omService, scmService, hddsDatanodes, clusterPath, null); this.numDatanodes = getHddsDatanodes().size(); - this.numOzoneManagers = ozoneManagers.size(); - this.numStorageContainerManagers = scms.size(); + this.numOzoneManagers = omService.getServices().size(); + this.numStorageContainerManagers = scmService.getServices().size(); this.failedOmSet = new HashSet<>(); this.failedDnSet = new HashSet<>(); @@ -232,7 +229,7 @@ public Builder addFailures(Class clazz) { protected void initializeConfiguration() throws IOException { super.initializeConfiguration(); - OzoneClientConfig clientConfig =new OzoneClientConfig(); + OzoneClientConfig clientConfig = new OzoneClientConfig(); clientConfig.setStreamBufferFlushSize(8 * 1024 * 1024); clientConfig.setStreamBufferMaxSize(16 * 1024 * 1024); clientConfig.setStreamBufferSize(4 * 1024); @@ -304,33 +301,21 @@ public MiniOzoneChaosCluster build() throws IOException { initOMRatisConf(); } - List omList; - List scmList; + SCMHAService scmService; + OMHAService omService; try { - if (numOfSCMs > 1) { - scmList = createSCMService(); - } else { - StorageContainerManager scm = createSCM(); - scm.start(); - scmList = Arrays.asList(scm); - } - if (numOfOMs > 1) { - omList = createOMService(); - } else { - OzoneManager om = createOM(); - om.start(); - omList = Arrays.asList(om); - } + scmService = createSCMService(); + omService = createOMService(); } catch (AuthenticationException ex) { throw new IOException("Unable to build MiniOzoneCluster. ", ex); } final List hddsDatanodes = createHddsDatanodes( - scmList, null); + scmService.getActiveServices(), null); MiniOzoneChaosCluster cluster = - new MiniOzoneChaosCluster(conf, omList, scmList, hddsDatanodes, - omServiceId, scmServiceId, path, clazzes); + new MiniOzoneChaosCluster(conf, omService, scmService, hddsDatanodes, + path, clazzes); if (startDataNodes) { cluster.startHddsDatanodes(); @@ -346,7 +331,7 @@ public static int getNumberOfOmToFail() { public Set omToFail() { int numNodesToFail = getNumberOfOmToFail(); - if (failedOmSet.size() >= numOzoneManagers/2) { + if (failedOmSet.size() >= numOzoneManagers / 2) { return Collections.emptySet(); } @@ -374,7 +359,7 @@ public void restartOzoneManager(OzoneManager om, boolean waitForOM) // Should the selected node be stopped or started. public boolean shouldStopOm() { - if (failedOmSet.size() >= numOzoneManagers/2) { + if (failedOmSet.size() >= numOzoneManagers / 2) { return false; } return RandomUtils.nextBoolean(); @@ -422,7 +407,7 @@ public static int getNumberOfScmToFail() { public Set scmToFail() { int numNodesToFail = getNumberOfScmToFail(); - if (failedScmSet.size() >= numStorageContainerManagers/2) { + if (failedScmSet.size() >= numStorageContainerManagers / 2) { return Collections.emptySet(); } @@ -449,7 +434,7 @@ public void restartStorageContainerManager(StorageContainerManager scm, // Should the selected node be stopped or started. public boolean shouldStopScm() { - if (failedScmSet.size() >= numStorageContainerManagers/2) { + if (failedScmSet.size() >= numStorageContainerManagers / 2) { return false; } return RandomUtils.nextBoolean(); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java index 7e78e0f3c6f..f9c7fd0ec4c 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java @@ -58,7 +58,7 @@ public class MiniOzoneLoadGenerator { this.conf = conf; this.omServiceID = omServiceId; - for(Class clazz : loadGeneratorClazzes) { + for (Class clazz : loadGeneratorClazzes) { addLoads(clazz, buffer); } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java index c6ccb3a8813..4a380feb6ac 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java @@ -195,7 +195,7 @@ void doPostOp() throws IOException { @Override public String toString() { return super.toString() + " " - + (readDir ? "readDirectory": "writeDirectory"); + + (readDir ? "readDirectory" : "writeDirectory"); } } diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index e106286610d..826b902ed03 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -42,7 +42,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - org.apache.ozone hdds-server-scm diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index eb1b0b1d420..04af12becb6 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -119,16 +119,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-platform-launcher test - - org.openjdk.jmh - jmh-core - test - - - org.openjdk.jmh - jmh-generator-annprocess - test - org.mockito mockito-core diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index dc86044fc10..752962f6b87 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -177,7 +177,7 @@ public void testObjectStoreCreateWithO3fs() throws Exception { keys.add("/dir1/dir2"); keys.add("/dir1/dir2/dir3"); keys.add("/dir1/dir2/dir3/dir4/"); - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { int length = 10; String fileName = parentDir.concat("/file" + i + "/"); keys.add(fileName); @@ -190,7 +190,7 @@ public void testObjectStoreCreateWithO3fs() throws Exception { } // check - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { String fileName = parentDir.concat("/file" + i + "/"); Path p = new Path(fileName); Assert.assertTrue(o3fs.getFileStatus(p).isFile()); @@ -209,12 +209,12 @@ public void testObjectStoreCreateWithO3fs() throws Exception { Assert.assertTrue(result); // No Key should exist. - for(String key : keys) { + for (String key : keys) { checkPath(new Path(key)); } - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { int length = 10; String fileName = parentDir.concat("/file" + i + "/"); OzoneOutputStream ozoneOutputStream = @@ -229,12 +229,12 @@ public void testObjectStoreCreateWithO3fs() throws Exception { o3fs.rename(new Path("/dir1"), new Path("/dest")); // No source Key should exist. - for(String key : keys) { + for (String key : keys) { checkPath(new Path(key)); } // check dest path. - for (int i=1; i <= 3; i++) { + for (int i = 1; i <= 3; i++) { String fileName = "/dest/".concat(parentDir.concat("/file" + i + "/")); Path p = new Path(fileName); Assert.assertTrue(o3fs.getFileStatus(p).isFile()); @@ -467,7 +467,7 @@ private void checkPath(Path path) { private void checkAncestors(Path p) throws Exception { p = p.getParent(); - while(p.getParent() != null) { + while (p.getParent() != null) { FileStatus fileStatus = o3fs.getFileStatus(p); Assert.assertTrue(fileStatus.isDirectory()); p = p.getParent(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 0e9c360771c..5c2e0cf3f1c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -523,7 +523,7 @@ public void testOzoneManagerLocatedFileStatusBlockOffsetsWithMultiBlockFile() assertEquals(0, blockLocations[0].getOffset()); assertEquals(blockSize, blockLocations[1].getOffset()); - assertEquals(2*blockSize, blockLocations[2].getOffset()); + assertEquals(2 * blockSize, blockLocations[2].getOffset()); assertEquals(blockSize, blockLocations[0].getLength()); assertEquals(blockSize, blockLocations[1].getLength()); assertEquals(837, blockLocations[2].getLength()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index 6491f88d69a..5393ffdb0ad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -44,6 +44,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; @@ -156,6 +157,7 @@ private void init() throws Exception { conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); conf.setBoolean(OZONE_ACL_ENABLED, true); if (!bucketLayout.equals(BucketLayout.FILE_SYSTEM_OPTIMIZED)) { conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, @@ -205,7 +207,7 @@ public void cleanup() { for (FileStatus fileStatus : fileStatuses) { fs.delete(fileStatus.getPath(), true); } - } catch (IOException ex){ + } catch (IOException ex) { fail("Failed to cleanup files."); } } @@ -258,7 +260,7 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() fs.mkdirs(dir1); try (FSDataOutputStream outputStream1 = fs.create(dir1, false)) { fail("Should throw FileAlreadyExistsException"); - } catch (FileAlreadyExistsException fae){ + } catch (FileAlreadyExistsException fae) { // ignore as its expected } @@ -289,14 +291,14 @@ public void testCreateFileShouldCheckExistenceOfDirWithSameName() fileStatus.isDirectory()); // invalid sub directory - try{ + try { fs.getFileStatus(new Path("/d1/d2/d3/d4/key3/invalid")); fail("Should throw FileNotFoundException"); } catch (FileNotFoundException fnfe) { // ignore as its expected } // invalid file name - try{ + try { fs.getFileStatus(new Path("/d1/d2/d3/d4/invalidkey")); fail("Should throw FileNotFoundException"); } catch (FileNotFoundException fnfe) { @@ -343,10 +345,10 @@ public void testCreateWithInvalidPaths() throws Exception { } private void checkInvalidPath(Path path) throws Exception { - try{ + try { LambdaTestUtils.intercept(InvalidPathException.class, "Invalid path Name", () -> fs.create(path, false)); - } catch (AssertionError e){ + } catch (AssertionError e) { fail("testCreateWithInvalidPaths failed for path" + path); } } @@ -415,7 +417,7 @@ public void testRecursiveDelete() throws Exception { Path grandparent = new Path("/gdir1"); for (int i = 1; i <= 10; i++) { - Path parent = new Path(grandparent, "pdir" +i); + Path parent = new Path(grandparent, "pdir" + i); Path child = new Path(parent, "child"); ContractTestUtils.touch(fs, child); } @@ -444,7 +446,7 @@ public void testRecursiveDelete() throws Exception { checkPath(grandparent); for (int i = 1; i <= 10; i++) { - Path parent = new Path(grandparent, "dir" +i); + Path parent = new Path(grandparent, "dir" + i); Path child = new Path(parent, "child"); checkPath(parent); checkPath(child); @@ -454,8 +456,8 @@ public void testRecursiveDelete() throws Exception { Path level0 = new Path("/level0"); for (int i = 1; i <= 3; i++) { - Path level1 = new Path(level0, "level" +i); - Path level2 = new Path(level1, "level" +i); + Path level1 = new Path(level0, "level" + i); + Path level2 = new Path(level1, "level" + i); Path level1File = new Path(level1, "file1"); Path level2File = new Path(level2, "file1"); ContractTestUtils.touch(fs, level1File); @@ -464,8 +466,8 @@ public void testRecursiveDelete() throws Exception { // Delete at sub directory level. for (int i = 1; i <= 3; i++) { - Path level1 = new Path(level0, "level" +i); - Path level2 = new Path(level1, "level" +i); + Path level1 = new Path(level0, "level" + i); + Path level2 = new Path(level1, "level" + i); fs.delete(level2, true); fs.delete(level1, true); } @@ -478,8 +480,8 @@ public void testRecursiveDelete() throws Exception { checkPath(level0); for (int i = 1; i <= 3; i++) { - Path level1 = new Path(level0, "level" +i); - Path level2 = new Path(level1, "level" +i); + Path level1 = new Path(level0, "level" + i); + Path level2 = new Path(level1, "level" + i); Path level1File = new Path(level1, "file1"); Path level2File = new Path(level2, "file1"); checkPath(level1); @@ -589,9 +591,9 @@ public void testListStatusWithIntermediateDir() throws Exception { // Wait until the filestatus is updated if (!enabledFileSystemPaths) { - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { - return fs.listStatus(parent).length!=0; + return fs.listStatus(parent).length != 0; } catch (IOException e) { LOG.error("listStatus() Failed", e); Assert.fail("listStatus() Failed"); @@ -642,7 +644,7 @@ public void testListStatusOnLargeDirectory() throws Exception { deleteRootDir(); // cleanup Set paths = new TreeSet<>(); int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2; - for(int i = 0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Path p = new Path(root, String.valueOf(i)); fs.mkdirs(p); paths.add(p.getName()); @@ -675,7 +677,7 @@ public void testListStatusOnLargeDirectory() throws Exception { "Total directories listed do not match the existing directories", numDirs, fileStatuses.length); - for (int i=0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { assertTrue(paths.contains(fileStatuses[i].getPath().getName())); } } @@ -1276,7 +1278,7 @@ public void testTrash() throws Exception { Path trashPath = new Path(userTrashCurrent, testKeyName); // Wait until the TrashEmptier purges the key - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { return !o3fs.exists(trashPath); } catch (IOException e) { @@ -1290,9 +1292,9 @@ public void testTrash() throws Exception { Assert.assertEquals(1, fs.listStatus(userTrash).length); // wait for deletion of checkpoint dir - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { - return o3fs.listStatus(userTrash).length==0; + return o3fs.listStatus(userTrash).length == 0; } catch (IOException e) { LOG.error("Delete from Trash Failed", e); Assert.fail("Delete from Trash Failed"); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java index b9fbde6ffd5..41539625ddb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -167,7 +167,7 @@ public static void shutdown() { * @return the leader OM's RPC address in the MiniOzoneHACluster */ private String getLeaderOMNodeAddr() { - MiniOzoneOMHAClusterImpl haCluster = (MiniOzoneOMHAClusterImpl) cluster; + MiniOzoneHAClusterImpl haCluster = (MiniOzoneHAClusterImpl) cluster; OzoneManager omLeader = haCluster.getOMLeader(); Assert.assertNotNull("There should be a leader OM at this point.", omLeader); @@ -188,7 +188,7 @@ private String getLeaderOMNodeAddr() { */ private String getHostFromAddress(String addr) { Optional hostOptional = getHostName(addr); - assert(hostOptional.isPresent()); + assert (hostOptional.isPresent()); return hostOptional.get(); } @@ -199,7 +199,7 @@ private String getHostFromAddress(String addr) { */ private int getPortFromAddress(String addr) { OptionalInt portOptional = getHostPort(addr); - assert(portOptional.isPresent()); + assert (portOptional.isPresent()); return portOptional.getAsInt(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java index 96b461f1f84..de731612023 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java @@ -191,7 +191,7 @@ public static void initClusterAndEnv() throws IOException, conf = new OzoneConfiguration(); conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL); - conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL/2); + conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); if (isBucketFSOptimized) { bucketLayout = BucketLayout.FILE_SYSTEM_OPTIMIZED; @@ -546,7 +546,7 @@ public void testListStatusOnLargeDirectory() throws Exception { Path root = new Path("/" + volumeName + "/" + bucketName); Set paths = new TreeSet<>(); int numDirs = LISTING_PAGE_SIZE + LISTING_PAGE_SIZE / 2; - for(int i = 0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Path p = new Path(root, String.valueOf(i)); fs.mkdirs(p); paths.add(p.getName()); @@ -557,12 +557,12 @@ public void testListStatusOnLargeDirectory() throws Exception { "Total directories listed do not match the existing directories", numDirs, fileStatuses.length); - for (int i=0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Assert.assertTrue(paths.contains(fileStatuses[i].getPath().getName())); } // Cleanup - for(int i = 0; i < numDirs; i++) { + for (int i = 0; i < numDirs; i++) { Path p = new Path(root, String.valueOf(i)); fs.delete(p, true); } @@ -1362,7 +1362,7 @@ public void testTrash() throws Exception { // Wait until the TrashEmptier purges the keys - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { return !ofs.exists(trashPath) && !ofs.exists(trashPath2); } catch (IOException e) { @@ -1372,7 +1372,7 @@ public void testTrash() throws Exception { } }, 1000, 180000); - if (isBucketFSOptimized){ + if (isBucketFSOptimized) { Assert.assertTrue(getOMMetrics() .getNumTrashAtomicDirRenames() > prevNumTrashAtomicDirRenames); } else { @@ -1385,10 +1385,10 @@ public void testTrash() throws Exception { } // wait for deletion of checkpoint dir - GenericTestUtils.waitFor(()-> { + GenericTestUtils.waitFor(() -> { try { - return ofs.listStatus(userTrash).length==0 && - ofs.listStatus(userTrash2).length==0; + return ofs.listStatus(userTrash).length == 0 && + ofs.listStatus(userTrash2).length == 0; } catch (IOException e) { LOG.error("Delete from Trash Failed", e); Assert.fail("Delete from Trash Failed"); @@ -1397,7 +1397,7 @@ public void testTrash() throws Exception { }, 1000, 120000); // This condition should succeed once the checkpoint directory is deleted - if(isBucketFSOptimized){ + if (isBucketFSOptimized) { GenericTestUtils.waitFor( () -> getOMMetrics().getNumTrashAtomicDirDeletes() > prevNumTrashAtomicDirDeletes, 100, 180000); @@ -1444,7 +1444,7 @@ private void checkInvalidPath(Path path) throws Exception { @Test public void testRenameFile() throws Exception { final String dir = "/dir" + new Random().nextInt(1000); - Path dirPath = new Path(getBucketPath() +dir); + Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); Path file1Source = new Path(getBucketPath() + dir @@ -1466,7 +1466,7 @@ public void testRenameFile() throws Exception { @Test public void testRenameFileToDir() throws Exception { final String dir = "/dir" + new Random().nextInt(1000); - Path dirPath = new Path(getBucketPath() +dir); + Path dirPath = new Path(getBucketPath() + dir); getFs().mkdirs(dirPath); Path file1Destin = new Path(getBucketPath() + dir + "/file1"); @@ -1531,11 +1531,11 @@ public void testRenameDirToItsOwnSubDir() throws Exception { final Path sourceRoot = new Path(getBucketPath() + root); LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1); // rename should fail and return false - try{ + try { getFs().rename(sourceRoot, subDir1); fail("Should throw exception : Cannot rename a directory to" + " its own subdirectory"); - } catch (IllegalArgumentException e){ + } catch (IllegalArgumentException e) { //expected } } @@ -1560,7 +1560,7 @@ public void testRenameDestinationParentDoesntExist() throws Exception { try { getFs().rename(dir2SourcePath, destinPath); fail("Should fail as parent of dst does not exist!"); - } catch (FileNotFoundException fnfe){ + } catch (FileNotFoundException fnfe) { //expected } // (b) parent of dst is a file. /root_dir/file1/c @@ -1568,10 +1568,10 @@ public void testRenameDestinationParentDoesntExist() throws Exception { ContractTestUtils.touch(getFs(), filePath); Path newDestinPath = new Path(filePath, "c"); // rename shouldthrow exception - try{ + try { getFs().rename(dir2SourcePath, newDestinPath); fail("Should fail as parent of dst is a file!"); - } catch (IOException e){ + } catch (IOException e) { //expected } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java new file mode 100644 index 00000000000..333ef18f5f0 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCpWithFSO.java @@ -0,0 +1,65 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.fs.ozone.contract; + +import java.io.IOException; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; + +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup; + + +/** + * Contract test suite covering S3A integration with DistCp. + * Uses the block output stream, buffered to disk. This is the + * recommended output mechanism for DistCP due to its scalability. + * This test suite runs the server in File System Optimized mode. + *

+ * Note: It isn't possible to convert this into a parameterized test due to + * unrelated failures occurring while trying to handle directories with names + * containing '[' and ']' characters. + */ +public class ITestOzoneContractDistCpWithFSO + extends AbstractContractDistCpTest { + + @BeforeClass + public static void createCluster() throws IOException { + OzoneContract.createCluster(true); + } + + @AfterClass + public static void teardownCluster() throws IOException { + OzoneContract.destroyCluster(); + } + + @Override + protected OzoneContract createContract(Configuration conf) { + return new OzoneContract(conf); + } + + @Override + protected void deleteTestDirInTeardown() throws IOException { + super.deleteTestDirInTeardown(); + cleanup("TEARDOWN", getLocalFS(), getLocalDir()); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java index 0fc23c36087..f7858d1e2a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractUtils.java @@ -27,7 +27,7 @@ */ public final class ITestOzoneContractUtils { - private ITestOzoneContractUtils(){} + private ITestOzoneContractUtils() { } private static List fsoCombinations = Arrays.asList(new Object[] { // FSO configuration is a cluster level server side configuration. @@ -47,7 +47,7 @@ private ITestOzoneContractUtils(){} // and old buckets will be operated on }); - static List getFsoCombinations(){ + static List getFsoCombinations() { return fsoCombinations; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java index 56326b45273..784897aae75 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java @@ -66,10 +66,16 @@ public Path getTestPath() { return path; } - public static void initOzoneConfiguration(boolean fsoServer){ + public static void initOzoneConfiguration(boolean fsoServer) { fsOptimizedServer = fsoServer; } + public static void createCluster(boolean fsoServer) throws IOException { + // Set the flag to enable/disable FSO on server. + initOzoneConfiguration(fsoServer); + createCluster(); + } + public static void createCluster() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); DatanodeRatisServerConfig ratisServerConfig = @@ -86,12 +92,16 @@ public static void createCluster() throws IOException { conf.addResource(CONTRACT_XML); - if (fsOptimizedServer){ - conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, - true); + if (fsOptimizedServer) { + // Default bucket layout is set to FSO in case of FSO server. + conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, + OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED); + } else { + // Default bucket layout is set to LEGACY to support Hadoop compatible + // FS operations that are incompatible with OBS (default config value). + conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, + BucketLayout.LEGACY.name()); } - conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, - BucketLayout.LEGACY.name()); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(5).build(); try { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java index 99df78fb150..f3858224b33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -70,6 +71,7 @@ public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_CONTAINER_REPORT_INTERVAL, "3000s"); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index d741117ab6f..c1bbcf44f38 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -59,7 +59,7 @@ public void init(int numDatanodes, int datanodePipelineLimit) cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes/3) + .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) .setHbInterval(2000) .setHbProcessorInterval(1000) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java index 8a8de67567e..b4d7270e9a2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestMultiRaftSetup.java @@ -63,6 +63,7 @@ public void init(int dnCount, OzoneConfiguration conf) throws Exception { pipelineDestroyTimeoutInMillis = 1000; conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); cluster.waitForClusterToBeReady(); scm = cluster.getStorageContainerManager(); nodeManager = scm.getScmNodeManager(); @@ -153,7 +154,7 @@ public void testMultiRaft() throws Exception { shutdown(); } private void assertNotSamePeers() { - nodeManager.getAllNodes().forEach((dn) ->{ + nodeManager.getAllNodes().forEach((dn) -> { Collection peers = nodeManager.getPeerList(dn); Assert.assertFalse(peers.contains(dn)); List trimList = nodeManager.getAllNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index cb868083d58..137a408ba31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -174,7 +174,7 @@ public void testPipelineCloseWithOpenContainer() public void testPipelineCloseWithPipelineAction() throws Exception { List dns = ratisContainer.getPipeline().getNodes(); PipelineActionsFromDatanode - pipelineActionsFromDatanode = TestUtils + pipelineActionsFromDatanode = HddsTestUtils .getPipelineActionFromDatanode(dns.get(0), ratisContainer.getPipeline().getId()); // send closing action for pipeline diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index 58c0c62c2eb..457e12afd59 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -61,7 +61,7 @@ public void init(int numDatanodes) throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(numDatanodes) - .setTotalPipelineNumLimit(numDatanodes + numDatanodes/3) + .setTotalPipelineNumLimit(numDatanodes + numDatanodes / 3) .setHbInterval(2000) .setHbProcessorInterval(1000) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java index 724d34c2d6a..4c97b51c14f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java @@ -86,10 +86,10 @@ public void testScmSafeMode() throws Exception { int datanodeCount = 6; setup(datanodeCount); - waitForRatis3NodePipelines(datanodeCount/3); + waitForRatis3NodePipelines(datanodeCount / 3); waitForRatis1NodePipelines(datanodeCount); - int totalPipelineCount = datanodeCount + (datanodeCount/3); + int totalPipelineCount = datanodeCount + (datanodeCount / 3); //Cluster is started successfully cluster.stop(); @@ -178,7 +178,7 @@ public void testScmSafeMode() throws Exception { }); waitForRatis1NodePipelines(datanodeCount); - waitForRatis3NodePipelines(datanodeCount/3); + waitForRatis3NodePipelines(datanodeCount / 3); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java index ecf12724a98..800992d52de 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java @@ -26,6 +26,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY_READONLY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.OPEN; import static org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature.INITIAL_VERSION; import static org.apache.hadoop.ozone.upgrade.InjectedUpgradeFinalizationExecutor.UpgradeTestInjectionPoints.AFTER_COMPLETE_FINALIZATION; @@ -146,6 +147,7 @@ public static void initClass() { conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.set(OZONE_DATANODE_PIPELINE_LIMIT, "1"); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); MiniOzoneCluster.Builder builder = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(NUM_DATA_NODES) @@ -187,7 +189,7 @@ public void shutdown() throws IOException, InterruptedException { * Some tests repeatedly modify the cluster. Helper function to reload the * latest SCM state. */ - private void loadSCMState(){ + private void loadSCMState() { scm = cluster.getStorageContainerManager(); scmContainerManager = scm.getContainerManager(); scmPipelineManager = scm.getPipelineManager(); @@ -501,7 +503,7 @@ private Boolean injectSCMFailureDuringSCMUpgrade() IOException { // For some tests this could get called in a different thread context. // We need to guard concurrent updates to the cluster. - synchronized(cluster) { + synchronized (cluster) { cluster.restartStorageContainerManager(true); loadSCMState(); } @@ -1088,7 +1090,7 @@ public void testFinalizationWithFailureInjectionHelper( // Verify that new pipeline can be created with upgraded datanodes. try { testPostUpgradePipelineCreation(); - } catch(SCMException e) { + } catch (SCMException e) { // If pipeline creation fails, make sure that there is a valid reason // for this i.e. all datanodes are already part of some pipeline. for (HddsDatanodeService dataNode : cluster.getHddsDatanodes()) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index dc200508e7f..b6d4b31ddd0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -63,7 +63,7 @@ static Builder newBuilder(OzoneConfiguration conf) { * @return MiniOzoneCluster builder */ static Builder newOMHABuilder(OzoneConfiguration conf) { - return new MiniOzoneOMHAClusterImpl.Builder(conf); + return new MiniOzoneHAClusterImpl.Builder(conf); } static Builder newHABuilder(OzoneConfiguration conf) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index d9b5b3c5dfb..919b9b2a209 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -44,7 +44,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; @@ -152,22 +152,6 @@ protected MiniOzoneClusterImpl(OzoneConfiguration conf, this.reconServer = reconServer; } - /** - * Creates a new MiniOzoneCluster without the OzoneManager. This is used by - * {@link MiniOzoneOMHAClusterImpl} for starting multiple OzoneManagers. - * - * @param conf - * @param scm - * @param hddsDatanodes - */ - MiniOzoneClusterImpl(OzoneConfiguration conf, StorageContainerManager scm, - List hddsDatanodes, ReconServer reconServer) { - this.conf = conf; - this.scm = scm; - this.hddsDatanodes = hddsDatanodes; - this.reconServer = reconServer; - } - /** * Creates a new MiniOzoneCluster without the OzoneManager and * StorageContainerManager. This is used by @@ -361,7 +345,7 @@ public void restartStorageContainerManager(boolean waitForDatanode) LOG.info("Restarting SCM in cluster " + this.getClass()); scm.stop(); scm.join(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); scm.start(); if (waitForDatanode) { waitForClusterToBeReady(); @@ -693,7 +677,7 @@ protected void initializeConfiguration() throws IOException { // In this way safemode exit will happen only when atleast we have one // pipeline. conf.setInt(HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, - numOfDatanodes >=3 ? 3 : 1); + numOfDatanodes >= 3 ? 3 : 1); configureTrace(); } @@ -719,7 +703,7 @@ protected StorageContainerManager createSCM() scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); - StorageContainerManager scm = TestUtils.getScmSimple(conf); + StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); HealthyPipelineSafeModeRule rule = scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule(); if (rule != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java index 890462098a6..ab2405ab8cd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterProvider.java @@ -189,7 +189,7 @@ private void ensureNotShutdown() throws IOException { private Thread reapClusters() { Thread t = new Thread(() -> { - while(!shutdown || !expiredClusters.isEmpty()) { + while (!shutdown || !expiredClusters.isEmpty()) { try { // Why not just call take and wait forever until interrupt is // thrown? Inside MiniCluster.shutdown, there are places where it @@ -251,7 +251,7 @@ private Thread createClusters() { } private void destroyRemainingClusters() { - while(!clusters.isEmpty()) { + while (!clusters.isEmpty()) { try { MiniOzoneCluster cluster = clusters.poll(); if (cluster != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 9bfae2a66a9..556af06044d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -23,9 +23,10 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.hadoop.hdds.ExitManager; +import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.CheckedConsumer; import org.apache.hadoop.hdds.scm.safemode.HealthyPipelineSafeModeRule; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -39,22 +40,30 @@ import org.apache.hadoop.ozone.recon.ReconServer; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ratis.util.IOUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.IOException; import java.net.BindException; +import java.net.ServerSocket; import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Random; +import java.util.PrimitiveIterator; +import java.util.Queue; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Function; -import java.util.stream.Collectors; +import static java.util.Collections.singletonList; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION; +import static org.apache.hadoop.ozone.OzoneTestUtils.reservePorts; import static org.apache.hadoop.ozone.om.OmUpgradeConfig.ConfigStrings.OZONE_OM_INIT_DEFAULT_LAYOUT_VERSION; /** @@ -75,51 +84,27 @@ public class MiniOzoneHAClusterImpl extends MiniOzoneClusterImpl { private int waitForClusterToBeReadyTimeout = 120000; // 2 min - private static final Random RANDOM = new Random(); private static final int RATIS_RPC_TIMEOUT = 1000; // 1 second - private static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds + public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds /** * Creates a new MiniOzoneCluster. * * @throws IOException if there is an I/O error */ - @SuppressWarnings("checkstyle:ParameterNumber") public MiniOzoneHAClusterImpl( OzoneConfiguration conf, - List activeOMList, - List inactiveOMList, - List activeSCMList, - List inactiveSCMList, + OMHAService omhaService, + SCMHAService scmhaService, List hddsDatanodes, - String omServiceId, - String scmServiceId, String clusterPath, ReconServer reconServer) { super(conf, hddsDatanodes, reconServer); - omhaService = - new OMHAService(activeOMList, inactiveOMList, omServiceId); - scmhaService = - new SCMHAService(activeSCMList, inactiveSCMList, scmServiceId); + this.omhaService = omhaService; + this.scmhaService = scmhaService; this.clusterMetaPath = clusterPath; } - /** - * Creates a new MiniOzoneCluster with all OMs active. - * This is used by MiniOzoneChaosCluster. - */ - protected MiniOzoneHAClusterImpl( - OzoneConfiguration conf, - List omList, - List scmList, - List hddsDatanodes, - String omServiceId, - String scmServiceId, - String clusterPath) { - this(conf, omList, null, scmList, null, hddsDatanodes, - omServiceId, scmServiceId, clusterPath, null); - } - @Override public String getOMServiceId() { return omhaService.getServiceId(); @@ -155,8 +140,8 @@ public boolean isOMActive(String omNodeId) { return omhaService.isServiceActive(omNodeId); } - public boolean isSCMActive(String scmNodeId) { - return scmhaService.isServiceActive(scmNodeId); + public Iterator getInactiveSCM() { + return scmhaService.inactiveServices(); } public StorageContainerManager getSCM(String scmNodeId) { @@ -275,7 +260,7 @@ public void restartStorageContainerManager( OzoneConfiguration scmConf = scm.getConfiguration(); shutdownStorageContainerManager(scm); scm.join(); - scm = TestUtils.getScmSimple(scmConf); + scm = HddsTestUtils.getScmSimple(scmConf); scmhaService.activate(scm); scm.start(); if (waitForSCM) { @@ -309,6 +294,13 @@ public void waitForSCMToBeReady() }, 1000, waitForClusterToBeReadyTimeout); } + @Override + public void shutdown() { + super.shutdown(); + omhaService.releasePorts(); + scmhaService.releasePorts(); + } + @Override public void stop() { for (OzoneManager ozoneManager : this.omhaService.getServices()) { @@ -343,6 +335,29 @@ public void stopOzoneManager(String omNodeId) { omhaService.deactivate(om); } + private static void configureOMPorts(ConfigurationTarget conf, + String omServiceId, String omNodeId, + ReservedPorts omPorts, ReservedPorts omRpcPorts) { + + String omAddrKey = ConfUtils.addKeySuffixes( + OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); + String omHttpAddrKey = ConfUtils.addKeySuffixes( + OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId); + String omHttpsAddrKey = ConfUtils.addKeySuffixes( + OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId); + String omRatisPortKey = ConfUtils.addKeySuffixes( + OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId); + + PrimitiveIterator.OfInt nodePorts = omPorts.assign(omNodeId); + PrimitiveIterator.OfInt rpcPorts = omRpcPorts.assign(omNodeId); + conf.set(omAddrKey, "127.0.0.1:" + rpcPorts.nextInt()); + conf.set(omHttpAddrKey, "127.0.0.1:" + nodePorts.nextInt()); + conf.set(omHttpsAddrKey, "127.0.0.1:" + nodePorts.nextInt()); + conf.setInt(omRatisPortKey, nodePorts.nextInt()); + + omRpcPorts.release(omNodeId); + } + /** * Builder for configuring the MiniOzoneCluster to run. */ @@ -356,6 +371,15 @@ public static class Builder extends MiniOzoneClusterImpl.Builder { private List activeSCMs = new ArrayList<>(); private List inactiveSCMs = new ArrayList<>(); + // These port reservations are for servers started when the component + // (OM or SCM) is started. These are Ratis, HTTP and HTTPS. We also have + // another set of ports for RPC endpoints, which are started as soon as + // the component is created (in methods called by OzoneManager and + // StorageContainerManager constructors respectively). So we need to manage + // them separately, see initOMHAConfig() and initSCMHAConfig(). + private final ReservedPorts omPorts = new ReservedPorts(3); + private final ReservedPorts scmPorts = new ReservedPorts(3); + /** * Creates a new Builder. * @@ -365,14 +389,6 @@ public Builder(OzoneConfiguration conf) { super(conf); } - public List getActiveOMs() { - return activeOMs; - } - - public List getInactiveOMs() { - return inactiveOMs; - } - @Override public MiniOzoneCluster build() throws IOException { if (numOfActiveOMs > numOfOMs) { @@ -398,11 +414,12 @@ public MiniOzoneCluster build() throws IOException { DefaultMetricsSystem.setMiniClusterMode(true); initializeConfiguration(); initOMRatisConf(); - StorageContainerManager scm; + SCMHAService scmService; + OMHAService omService; ReconServer reconServer = null; try { - createSCMService(); - createOMService(); + scmService = createSCMService(); + omService = createOMService(); if (includeRecon) { configureRecon(); reconServer = new ReconServer(); @@ -413,11 +430,10 @@ public MiniOzoneCluster build() throws IOException { } final List hddsDatanodes = createHddsDatanodes( - activeSCMs, reconServer); + scmService.getActiveServices(), reconServer); MiniOzoneHAClusterImpl cluster = new MiniOzoneHAClusterImpl(conf, - activeOMs, inactiveOMs, activeSCMs, inactiveSCMs, - hddsDatanodes, omServiceId, scmServiceId, path, reconServer); + omService, scmService, hddsDatanodes, path, reconServer); if (startDataNodes) { cluster.startHddsDatanodes(); @@ -458,20 +474,23 @@ protected void initOMRatisConf() { /** * Start OM service with multiple OMs. */ - protected List createOMService() throws IOException, + protected OMHAService createOMService() throws IOException, AuthenticationException { + if (omServiceId == null) { + OzoneManager om = createOM(); + om.start(); + return new OMHAService(singletonList(om), null, null, null); + } List omList = Lists.newArrayList(); int retryCount = 0; - int basePort; while (true) { try { - basePort = 10000 + RANDOM.nextInt(1000) * 4; - initOMHAConfig(basePort); + initOMHAConfig(); - for (int i = 1; i<= numOfOMs; i++) { + for (int i = 1; i <= numOfOMs; i++) { // Set nodeId String nodeId = OM_NODE_ID_PREFIX + i; OzoneConfiguration config = new OzoneConfiguration(conf); @@ -520,28 +539,32 @@ protected List createOMService() throws IOException, omList.clear(); ++retryCount; LOG.info("MiniOzoneHACluster port conflicts, retried {} times", - retryCount); + retryCount, e); } } - return omList; + return new OMHAService(activeOMs, inactiveOMs, omServiceId, omPorts); } /** * Start OM service with multiple OMs. */ - protected List createSCMService() + protected SCMHAService createSCMService() throws IOException, AuthenticationException { + if (scmServiceId == null) { + StorageContainerManager scm = createSCM(); + scm.start(); + return new SCMHAService(singletonList(scm), null, null, null); + } + List scmList = Lists.newArrayList(); int retryCount = 0; - int basePort = 12000; while (true) { try { - basePort = 12000 + RANDOM.nextInt(1000) * 4; - initSCMHAConfig(basePort); + initSCMHAConfig(); - for (int i = 1; i<= numOfSCMs; i++) { + for (int i = 1; i <= numOfSCMs; i++) { // Set nodeId String nodeId = SCM_NODE_ID_PREFIX + i; String metaDirPath = path + "/" + nodeId; @@ -560,7 +583,7 @@ protected List createSCMService() } else { StorageContainerManager.scmBootstrap(scmConfig); } - StorageContainerManager scm = TestUtils.getScmSimple(scmConfig); + StorageContainerManager scm = HddsTestUtils.getScmSimple(scmConfig); HealthyPipelineSafeModeRule rule = scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule(); if (rule != null) { @@ -575,11 +598,11 @@ protected List createSCMService() scm.start(); activeSCMs.add(scm); LOG.info("Started SCM RPC server at {}", - scm.getClientProtocolServer()); + scm.getClientRpcAddress()); } else { inactiveSCMs.add(scm); LOG.info("Intialized SCM at {}. This SCM is currently " - + "inactive (not running).", scm.getClientProtocolServer()); + + "inactive (not running).", scm.getClientRpcAddress()); } } @@ -590,21 +613,22 @@ protected List createSCMService() scm.stop(); scm.join(); LOG.info("Stopping StorageContainerManager server at {}", - scm.getClientProtocolServer()); + scm.getClientRpcAddress()); } scmList.clear(); ++retryCount; LOG.info("MiniOzoneHACluster port conflicts, retried {} times", - retryCount); + retryCount, e); } } - return scmList; + + return new SCMHAService(activeSCMs, inactiveSCMs, scmServiceId, scmPorts); } /** * Initialize HA related configurations. */ - private void initSCMHAConfig(int basePort) throws IOException { + private void initSCMHAConfig() { // Set configurations required for starting OM HA service, because that // is the serviceID being passed to start Ozone HA cluster. // Here setting internal service and OZONE_OM_SERVICE_IDS_KEY, in this @@ -616,11 +640,14 @@ private void initSCMHAConfig(int basePort) throws IOException { StringBuilder scmNodesKeyValue = new StringBuilder(); StringBuilder scmNames = new StringBuilder(); - int port = basePort; + scmPorts.reserve(numOfSCMs); + ReservedPorts scmRpcPorts = new ReservedPorts(4); + scmRpcPorts.reserve(numOfSCMs); - for (int i = 1; i <= numOfSCMs; i++, port+=10) { + for (int i = 1; i <= numOfSCMs; i++) { String scmNodeId = SCM_NODE_ID_PREFIX + i; scmNodesKeyValue.append(",").append(scmNodeId); + String scmAddrKey = ConfUtils.addKeySuffixes( ScmConfigKeys.OZONE_SCM_ADDRESS_KEY, scmServiceId, scmNodeId); String scmHttpAddrKey = ConfUtils.addKeySuffixes( @@ -641,18 +668,32 @@ private void initSCMHAConfig(int basePort) throws IOException { String scmGrpcPortKey = ConfUtils.addKeySuffixes( ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, scmServiceId, scmNodeId); + PrimitiveIterator.OfInt nodePorts = scmPorts.assign(scmNodeId); + PrimitiveIterator.OfInt rpcPorts = scmRpcPorts.assign(scmNodeId); conf.set(scmAddrKey, "127.0.0.1"); - conf.set(scmHttpAddrKey, "127.0.0.1:" + (port + 2)); - conf.set(scmHttpsAddrKey, "127.0.0.1:" + (port + 3)); - conf.setInt(scmRatisPortKey, port + 4); - //conf.setInt("ozone.scm.ha.ratis.bind.port", port + 4); - conf.set(dnPortKey, "127.0.0.1:" + (port + 5)); - conf.set(blockClientKey, "127.0.0.1:" + (port + 6)); - conf.set(ssClientKey, "127.0.0.1:" + (port + 7)); - conf.setInt(scmGrpcPortKey, port + 8); - scmNames.append(",").append("localhost:" + (port + 5)); - conf.set(ScmConfigKeys. - OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:" + (port + 6)); + conf.set(scmHttpAddrKey, "127.0.0.1:" + nodePorts.nextInt()); + conf.set(scmHttpsAddrKey, "127.0.0.1:" + nodePorts.nextInt()); + + int ratisPort = nodePorts.nextInt(); + conf.setInt(scmRatisPortKey, ratisPort); + //conf.setInt("ozone.scm.ha.ratis.bind.port", ratisPort); + + int dnPort = rpcPorts.nextInt(); + conf.set(dnPortKey, "127.0.0.1:" + dnPort); + scmNames.append(",localhost:").append(dnPort); + + conf.set(ssClientKey, "127.0.0.1:" + rpcPorts.nextInt()); + conf.setInt(scmGrpcPortKey, rpcPorts.nextInt()); + + int blockPort = rpcPorts.nextInt(); + conf.set(blockClientKey, "127.0.0.1:" + blockPort); + conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, + "127.0.0.1:" + blockPort); + + if (i <= numOfActiveSCMs) { + scmPorts.release(scmNodeId); + } + scmRpcPorts.release(scmNodeId); } conf.set(scmNodesKey, scmNodesKeyValue.substring(1)); @@ -662,7 +703,7 @@ private void initSCMHAConfig(int basePort) throws IOException { /** * Initialize HA related configurations. */ - private void initOMHAConfig(int basePort) throws IOException { + private void initOMHAConfig() { // Set configurations required for starting OM HA service, because that // is the serviceID being passed to start Ozone HA cluster. // Here setting internal service and OZONE_OM_SERVICE_IDS_KEY, in this @@ -673,25 +714,19 @@ private void initOMHAConfig(int basePort) throws IOException { OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); List omNodeIds = new ArrayList<>(); - int port = basePort; + omPorts.reserve(numOfOMs); + ReservedPorts omRpcPorts = new ReservedPorts(1); + omRpcPorts.reserve(numOfOMs); - for (int i = 1; i <= numOfOMs; i++, port+=6) { + for (int i = 1; i <= numOfOMs; i++) { String omNodeId = OM_NODE_ID_PREFIX + i; omNodeIds.add(omNodeId); - String omAddrKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); - String omHttpAddrKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId); - String omHttpsAddrKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId); - String omRatisPortKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId); - - conf.set(omAddrKey, "127.0.0.1:" + port); - conf.set(omHttpAddrKey, "127.0.0.1:" + (port + 2)); - conf.set(omHttpsAddrKey, "127.0.0.1:" + (port + 3)); - conf.setInt(omRatisPortKey, port + 4); + configureOMPorts(conf, omServiceId, omNodeId, omPorts, omRpcPorts); + + if (i <= numOfActiveOMs) { + omPorts.release(omNodeId); + } } conf.set(omNodesKey, String.join(",", omNodeIds)); @@ -729,9 +764,8 @@ public void bootstrapOzoneManager(String omNodeId, while (true) { try { - List portSet = getFreePortList(4); OzoneConfiguration newConf = addNewOMToConfig(getOMServiceId(), - omNodeId, portSet); + omNodeId); if (updateConfigs) { updateOMConfigs(newConf); @@ -755,7 +789,7 @@ public void bootstrapOzoneManager(String omNodeId, e.getCause() instanceof BindException) { ++retryCount; LOG.info("MiniOzoneHACluster port conflicts, retried {} times", - retryCount); + retryCount, e); } else { throw e; } @@ -772,30 +806,19 @@ public void bootstrapOzoneManager(String omNodeId, * Set the configs for new OMs. */ private OzoneConfiguration addNewOMToConfig(String omServiceId, - String omNodeId, List portList) { + String omNodeId) { + + ReservedPorts omPorts = omhaService.getPorts(); + omPorts.reserve(1); + ReservedPorts omRpcPorts = new ReservedPorts(1); + omRpcPorts.reserve(1); OzoneConfiguration newConf = new OzoneConfiguration(getConf()); + configureOMPorts(newConf, omServiceId, omNodeId, omPorts, omRpcPorts); + String omNodesKey = ConfUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_NODES_KEY, omServiceId); - StringBuilder omNodesKeyValue = new StringBuilder(); - omNodesKeyValue.append(newConf.get(omNodesKey)) - .append(",").append(omNodeId); - - String omAddrKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); - String omHttpAddrKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, omServiceId, omNodeId); - String omHttpsAddrKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_HTTPS_ADDRESS_KEY, omServiceId, omNodeId); - String omRatisPortKey = ConfUtils.addKeySuffixes( - OMConfigKeys.OZONE_OM_RATIS_PORT_KEY, omServiceId, omNodeId); - - newConf.set(omAddrKey, "127.0.0.1:" + portList.get(0)); - newConf.set(omHttpAddrKey, "127.0.0.1:" + portList.get(1)); - newConf.set(omHttpsAddrKey, "127.0.0.1:" + portList.get(2)); - newConf.setInt(omRatisPortKey, portList.get(3)); - - newConf.set(omNodesKey, omNodesKeyValue.toString()); + newConf.set(omNodesKey, newConf.get(omNodesKey) + "," + omNodeId); return newConf; } @@ -838,9 +861,7 @@ private OzoneManager bootstrapNewOM(String nodeId, OzoneConfiguration newConf, ExitManagerForOM exitManager = new ExitManagerForOM(this, nodeId); om.setExitManagerForTesting(exitManager); omhaService.addInstance(om, false); - - om.start(); - omhaService.activate(om); + startInactiveOM(nodeId); return om; } @@ -902,6 +923,7 @@ static class MiniOzoneHAService { private List services; private String serviceId; private String serviceName; + private final ReservedPorts ports; // Active services s denote OM/SCM services which are up and running private List activeServices; @@ -911,9 +933,10 @@ static class MiniOzoneHAService { private Function serviceIdProvider; MiniOzoneHAService(String name, List activeList, - List inactiveList, String serviceId, - Function idProvider) { + List inactiveList, String serviceId, + ReservedPorts ports, Function idProvider) { this.serviceName = name; + this.ports = ports != null ? ports : new ReservedPorts(0); this.serviceMap = Maps.newHashMap(); this.serviceIdProvider = idProvider; if (activeList != null) { @@ -945,6 +968,10 @@ public List getServices() { return services; } + public void releasePorts() { + ports.releaseAll(); + } + public List getActiveServices() { return activeServices; } @@ -958,6 +985,8 @@ public void addInstance(Type t, boolean isActive) { serviceMap.put(serviceIdProvider.apply(t), t); if (isActive) { activeServices.add(t); + } else { + inactiveServices.add(t); } } @@ -975,6 +1004,10 @@ public boolean isServiceActive(String id) { return activeServices.contains(serviceMap.get(id)); } + public Iterator inactiveServices() { + return new ArrayList<>(inactiveServices).iterator(); + } + public Type getServiceByIndex(int index) { return this.services.get(index); } @@ -989,17 +1022,22 @@ public void startInactiveService(String id, if (!inactiveServices.contains(service)) { throw new IOException(serviceName + " is already active."); } else { + ports.release(id); serviceStarter.execute(service); activeServices.add(service); inactiveServices.remove(service); } } + + public ReservedPorts getPorts() { + return ports; + } } static class OMHAService extends MiniOzoneHAService { OMHAService(List activeList, List inactiveList, - String serviceId) { - super("OM", activeList, inactiveList, serviceId, + String serviceId, ReservedPorts omPorts) { + super("OM", activeList, inactiveList, serviceId, omPorts, OzoneManager::getOMNodeId); } } @@ -1007,10 +1045,10 @@ static class OMHAService extends MiniOzoneHAService { static class SCMHAService extends MiniOzoneHAService { SCMHAService(List activeList, - List inactiveList, - String serviceId) { + List inactiveList, + String serviceId, ReservedPorts scmPorts) { super("SCM", activeList, inactiveList, serviceId, - StorageContainerManager::getScmId); + scmPorts, StorageContainerManager::getSCMNodeId); } } @@ -1022,13 +1060,6 @@ public StorageContainerManager getStorageContainerManager() { return getStorageContainerManagers().get(0); } - private List getFreePortList(int size) { - return org.apache.ratis.util.NetUtils.createLocalServerAddress(size) - .stream() - .map(inetSocketAddress -> inetSocketAddress.getPort()) - .collect(Collectors.toList()); - } - private static final class ExitManagerForOM extends ExitManager { private MiniOzoneHAClusterImpl cluster; @@ -1055,4 +1086,77 @@ public void exitSystem(int status, String message, Logger log) throw new IOException(message); } } + + /** + * Reserves a number of ports for services. + */ + private static class ReservedPorts { + + private final Queue allPorts = new LinkedList<>(); + private final Map> assignedPorts = + new HashMap<>(); + private final int portsPerNode; + + ReservedPorts(int portsPerNode) { + this.portsPerNode = portsPerNode; + } + + /** + * Reserve {@code portsPerNode * nodes} ports by binding server sockets + * to random free ports. The sockets are kept open until + * {@link #release(String)} or {@link #releaseAll} is called. + */ + public void reserve(int nodes) { + Preconditions.checkState(allPorts.isEmpty()); + allPorts.addAll(reservePorts(portsPerNode * nodes)); + } + + /** + * Assign {@code portsPerNode} ports to a service identified by {@code id}. + * This set of ports should be released right before starting the service + * by calling {@link #release(String)}. + * + * @return iterator of the ports assigned + */ + public PrimitiveIterator.OfInt assign(String id) { + Preconditions.checkState(allPorts.size() >= portsPerNode); + List nodePorts = new LinkedList<>(); + for (int i = 0; i < portsPerNode; i++) { + nodePorts.add(allPorts.remove()); + } + assignedPorts.put(id, nodePorts); + LOG.debug("assign ports for {}: {}", id, nodePorts); + + return nodePorts.stream().mapToInt(ServerSocket::getLocalPort).iterator(); + } + + /** + * Release the ports assigned to the service identified by {@code id}. + * + * This closes the server sockets, making the same ports available for + * the service. Note: there is a race condition with other processes + * running on the host, but that's OK since this is for tests. + * + * If no ports are assigned to the service, this is a no-op. + */ + public void release(String id) { + List ports = assignedPorts.remove(id); + LOG.debug("release ports for {}: {}", id, ports); + if (ports != null) { + IOUtils.cleanup(LOG, ports.toArray(new Closeable[0])); + } + } + + /** + * Release all reserved ports, assigned or not. + */ + public void releaseAll() { + IOUtils.cleanup(LOG, allPorts.toArray(new Closeable[0])); + allPorts.clear(); + + for (String id : new ArrayList<>(assignedPorts.keySet())) { + release(id); + } + } + } } \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java deleted file mode 100644 index 2f2b7734148..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneOMHAClusterImpl.java +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.recon.ReconServer; -import org.apache.hadoop.security.authentication.client.AuthenticationException; - -import java.io.IOException; -import java.util.List; -import java.util.Collections; - -/** - * MiniOzoneOMHAClusterImpl creates a complete in-process Ozone cluster - * with OM HA suitable for running tests. The cluster consists of a set of - * OzoneManagers, StorageContainerManager and multiple DataNodes. - */ -public final class MiniOzoneOMHAClusterImpl extends MiniOzoneHAClusterImpl { - public static final int NODE_FAILURE_TIMEOUT = 2000; // 2 seconds - - /** - * Creates a new MiniOzoneOMHACluster. - * - * @throws IOException if there is an I/O error - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private MiniOzoneOMHAClusterImpl( - OzoneConfiguration conf, - List activeOMList, - List inactiveOMList, - StorageContainerManager scm, - List hddsDatanodes, - String omServiceId, - String metaPath, - ReconServer reconServer) { - super(conf, activeOMList, inactiveOMList, Collections.singletonList(scm), - null, hddsDatanodes, omServiceId, null, metaPath, reconServer); - } - - /** - * Builder for configuring the MiniOzoneCluster to run. - */ - public static class Builder extends MiniOzoneHAClusterImpl.Builder { - - /** - * Creates a new Builder. - * - * @param conf configuration - */ - public Builder(OzoneConfiguration conf) { - super(conf); - } - - @Override - public MiniOzoneCluster build() throws IOException { - if (numOfActiveOMs > numOfOMs) { - throw new IllegalArgumentException("Number of active OMs cannot be " + - "more than the total number of OMs"); - } - - // If num of ActiveOMs is not set, set it to numOfOMs. - if (numOfActiveOMs == ACTIVE_OMS_NOT_SET) { - numOfActiveOMs = numOfOMs; - } - - DefaultMetricsSystem.setMiniClusterMode(true); - initializeConfiguration(); - initOMRatisConf(); - StorageContainerManager scm; - ReconServer reconServer = null; - try { - scm = createSCM(); - scm.start(); - createOMService(); - if (includeRecon) { - configureRecon(); - reconServer = new ReconServer(); - reconServer.execute(new String[] {}); - } - } catch (AuthenticationException ex) { - throw new IOException("Unable to build MiniOzoneCluster. ", ex); - } - - final List hddsDatanodes = createHddsDatanodes( - Collections.singletonList(scm), reconServer); - - MiniOzoneClusterImpl cluster = new MiniOzoneOMHAClusterImpl(conf, - getActiveOMs(), getInactiveOMs(), scm, hddsDatanodes, - omServiceId, path, reconServer); - - if (startDataNodes) { - cluster.startHddsDatanodes(); - } - return cluster; - } - } -} \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 23ac5e104e4..dba97397805 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -17,7 +17,13 @@ */ package org.apache.hadoop.ozone; +import java.io.Closeable; import java.io.IOException; +import java.io.UncheckedIOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.ServerSocket; +import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hdds.client.BlockID; @@ -32,6 +38,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.ozone.test.LambdaTestUtils.VoidCallable; +import org.apache.ratis.util.IOUtils; import org.apache.ratis.util.function.CheckedConsumer; import org.junit.Assert; @@ -122,7 +129,7 @@ public static void closeAllContainers(EventPublisher eventPublisher, */ public static void performOperationOnKeyContainers( CheckedConsumer consumer, - List omKeyLocationInfoGroups) throws Exception{ + List omKeyLocationInfoGroups) throws Exception { for (OmKeyLocationInfoGroup omKeyLocationInfoGroup : omKeyLocationInfoGroups) { @@ -146,4 +153,20 @@ public static void expectOmException( Assert.assertEquals(code, ex.getResult()); } } + + public static List reservePorts(int count) { + List sockets = new ArrayList<>(count); + try { + for (int i = 0; i < count; i++) { + ServerSocket s = new ServerSocket(); + sockets.add(s); + s.setReuseAddress(true); + s.bind(new InetSocketAddress(InetAddress.getByName(null), 0), 1); + } + } catch (IOException e) { + IOUtils.cleanup(null, sockets.toArray(new Closeable[0])); + throw new UncheckedIOException(e); + } + return sockets; + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java index ac5b737ab3e..8663c7250b3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerBalancerOperations.java @@ -64,7 +64,7 @@ public static void setup() throws Exception { @AfterClass public static void cleanup() throws Exception { - if(cluster != null) { + if (cluster != null) { cluster.shutdown(); } } @@ -81,15 +81,17 @@ public void testContainerBalancerCLIOperations() throws Exception { boolean running = containerBalancerClient.getContainerBalancerStatus(); assertFalse(running); Optional threshold = Optional.of(0.1); - Optional idleiterations = Optional.of(10000); - Optional maxDatanodesRatioToInvolvePerIteration = Optional.of(1d); + Optional iterations = Optional.of(10000); + Optional maxDatanodesPercentageToInvolvePerIteration = + Optional.of(100); Optional maxSizeToMovePerIterationInGB = Optional.of(1L); Optional maxSizeEnteringTargetInGB = Optional.of(1L); Optional maxSizeLeavingSourceInGB = Optional.of(1L); - containerBalancerClient.startContainerBalancer(threshold, idleiterations, - maxDatanodesRatioToInvolvePerIteration, maxSizeToMovePerIterationInGB, - maxSizeEnteringTargetInGB, maxSizeLeavingSourceInGB); + containerBalancerClient.startContainerBalancer(threshold, iterations, + maxDatanodesPercentageToInvolvePerIteration, + maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, + maxSizeLeavingSourceInGB); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); @@ -98,15 +100,16 @@ public void testContainerBalancerCLIOperations() throws Exception { // modify this after balancer is fully completed try { Thread.sleep(100); - } catch (InterruptedException e) {} + } catch (InterruptedException e) { } running = containerBalancerClient.getContainerBalancerStatus(); assertFalse(running); // test normally start , and stop it before balance is completed - containerBalancerClient.startContainerBalancer(threshold, idleiterations, - maxDatanodesRatioToInvolvePerIteration, maxSizeToMovePerIterationInGB, - maxSizeEnteringTargetInGB, maxSizeLeavingSourceInGB); + containerBalancerClient.startContainerBalancer(threshold, iterations, + maxDatanodesPercentageToInvolvePerIteration, + maxSizeToMovePerIterationInGB, maxSizeEnteringTargetInGB, + maxSizeLeavingSourceInGB); running = containerBalancerClient.getContainerBalancerStatus(); assertTrue(running); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java index 55164688a16..e15d7b1f148 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java @@ -64,7 +64,7 @@ public static void setup() throws Exception { @AfterClass public static void cleanup() throws Exception { - if(cluster != null) { + if (cluster != null) { cluster.shutdown(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index 027afa3965b..29832701f6f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -83,7 +83,7 @@ public static OzoneBucket createVolumeAndBucket(MiniOzoneCluster cluster, public static void createKey(OzoneBucket bucket, String keyName, String content) throws IOException { createKey(bucket, keyName, ReplicationFactor.ONE, - ReplicationType.STAND_ALONE, content); + ReplicationType.RATIS, content); } public static void createKey(OzoneBucket bucket, String keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index e7436e06e85..2b095144d8b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -119,7 +119,7 @@ public void testStartMultipleDatanodes() throws Exception { cluster.waitForClusterToBeReady(); List datanodes = cluster.getHddsDatanodes(); assertEquals(numberOfNodes, datanodes.size()); - for(HddsDatanodeService dn : datanodes) { + for (HddsDatanodeService dn : datanodes) { // Create a single member pipe line List dns = new ArrayList<>(); dns.add(dn.getDatanodeDetails()); @@ -132,7 +132,7 @@ public void testStartMultipleDatanodes() throws Exception { .build(); // Verify client is able to connect to the container - try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)){ + try (XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf)) { client.connect(); assertTrue(client.isConnected(pipeline.getFirstNode())); } @@ -285,7 +285,7 @@ public void testContainerRandomPort() throws IOException { } private void createMalformedIDFile(File malformedFile) - throws IOException{ + throws IOException { malformedFile.delete(); DatanodeDetails id = randomDatanodeDetails(); ContainerUtils.writeDatanodeDetailsTo(id, malformedFile); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java index 61225a9cac6..44b635f36da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneOMHACluster.java @@ -39,11 +39,11 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; /** - * This class tests MiniOzoneOMHAClusterImpl. + * This class tests MiniOzoneHAClusterImpl. */ public class TestMiniOzoneOMHACluster { - private MiniOzoneOMHAClusterImpl cluster = null; + private MiniOzoneHAClusterImpl cluster = null; private OzoneConfiguration conf; private String clusterId; private String scmId; @@ -71,7 +71,7 @@ public void init() throws Exception { conf.set(OzoneConfigKeys.OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2); - cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) .setOMServiceId(omServiceId) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 7b7694b1393..3269c394f7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -110,7 +110,8 @@ private void addPropertiesNotInXml() { ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY, ReconServerConfigKeys.RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM, OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, - OMConfigKeys.OZONE_OM_HA_PREFIX + OMConfigKeys.OZONE_OM_HA_PREFIX, + OMConfigKeys.OZONE_OM_TRANSPORT_CLASS // TODO HDDS-2856 )); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 3805103ecf3..e74a98b1cc1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -38,7 +38,7 @@ import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.HASecurityUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; @@ -86,17 +86,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_KEYTAB_FILE_KEY; import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_KERBEROS_PRINCIPAL_KEY; import static org.apache.hadoop.net.ServerSocketUtil.getPort; @@ -148,7 +138,7 @@ public final class TestSecureOzoneCluster { public Timeout timeout = Timeout.seconds(80); @Rule - public TemporaryFolder folder= new TemporaryFolder(); + public TemporaryFolder folder = new TemporaryFolder(); private MiniKdc miniKdc; private OzoneConfiguration conf; @@ -171,6 +161,7 @@ public void init() { try { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, "localhost"); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); conf.setInt(OZONE_SCM_CLIENT_PORT_KEY, getPort(OZONE_SCM_CLIENT_PORT_DEFAULT, 100)); @@ -283,7 +274,7 @@ private void setSecureConfig() throws IOException { public void testSecureScmStartupSuccess() throws Exception { initSCM(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); assertEquals(clusterId, scmInfo.getClusterId()); @@ -294,7 +285,7 @@ public void testSecureScmStartupSuccess() throws Exception { public void testSCMSecurityProtocol() throws Exception { initSCM(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance try { scm.start(); @@ -337,7 +328,7 @@ public void testSCMSecurityProtocol() throws Exception { @Test public void testAdminAccessControlException() throws Exception { initSCM(); - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance try { scm.start(); @@ -400,7 +391,7 @@ public void testSecureScmStartupFailure() throws Exception { LambdaTestUtils.intercept(IOException.class, "Running in secure mode, but config doesn't have a keytab", - () -> TestUtils.getScmSimple(conf)); + () -> HddsTestUtils.getScmSimple(conf)); conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/_HOST@EXAMPLE.com"); @@ -408,7 +399,7 @@ public void testSecureScmStartupFailure() throws Exception { "/etc/security/keytabs/scm.keytab"); testCommonKerberosFailures( - () -> TestUtils.getScmSimple(conf)); + () -> HddsTestUtils.getScmSimple(conf)); } @@ -437,7 +428,7 @@ private void testCommonKerberosFailures(Callable test) throws Exception { public void testSecureOMInitializationFailure() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); setupOm(conf); conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "non-existent-user@EXAMPLE.com"); @@ -451,7 +442,7 @@ public void testSecureOMInitializationFailure() throws Exception { public void testSecureOmInitializationSuccess() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); @@ -469,7 +460,7 @@ public void testSecureOmInitializationSuccess() throws Exception { public void testAccessControlExceptionOnClient() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); setupOm(conf); @@ -723,7 +714,7 @@ public void testSecureOmReInit() throws Exception { initSCM(); try { - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); scm.start(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false); OMStorage omStore = new OMStorage(conf); @@ -769,7 +760,7 @@ public void testSecureOmInitSuccess() throws Exception { omLogs.clearOutput(); initSCM(); try { - scm = TestUtils.getScmSimple(conf); + scm = HddsTestUtils.getScmSimple(conf); scm.start(); OMStorage omStore = new OMStorage(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index badcc441d82..14aa85d0227 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -55,7 +55,7 @@ import java.util.Arrays; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.commons.io.FileUtils; @@ -369,6 +369,7 @@ public void testBlockDeletingThrottling() throws Exception { .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); try { DeletedBlockLog delLog = cluster.getStorageContainerManager() @@ -477,6 +478,7 @@ private Map> createDeleteTXLog( @Test public void testSCMInitialization() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); @@ -517,6 +519,7 @@ public void testSCMInitializationWithHAEnabled() throws Exception { @Test public void testSCMReinitialization() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); @@ -541,6 +544,7 @@ public void testSCMReinitialization() throws Exception { @Test public void testSCMReinitializationWithHAUpgrade() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); final String path = GenericTestUtils.getTempPath( UUID.randomUUID().toString()); Path scmPath = Paths.get(path, "scm-meta"); @@ -646,7 +650,7 @@ public void testSCMInitializationFailure() exception.expect(SCMException.class); exception.expectMessage( "SCM not initialized due to storage config failure"); - TestUtils.getScmSimple(conf); + HddsTestUtils.getScmSimple(conf); } @Test @@ -664,7 +668,7 @@ public void testScmInfo() throws Exception { scmStore.setScmId(scmId); // writes the version file properties scmStore.initialize(); - StorageContainerManager scm = TestUtils.getScmSimple(conf); + StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); Assert.assertEquals(clusterId, scmInfo.getClusterId()); @@ -743,6 +747,7 @@ public void testCloseContainerCommandOnRestart() throws Exception { .setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); try { TestStorageContainerManagerHelper helper = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java index b37f78571ae..58021f30ebf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java @@ -177,12 +177,12 @@ public InitResponse init() throws CertificateException { } @Override - public String getSignatureAlgorithm(){ + public String getSignatureAlgorithm() { return securityConfig.getSignatureAlgo(); } @Override - public String getSecurityProvider(){ + public String getSecurityProvider() { return securityConfig.getProvider(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index 12a5807c0e7..612d5aca452 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -49,12 +49,10 @@ HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - HDDS_SCM_WATCHER_TIMEOUT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; + import org.junit.Rule; import org.junit.rules.Timeout; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys. - OZONE_SCM_STALENODE_INTERVAL; /** * Tests the validity BCSID of a container. @@ -94,6 +92,7 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).setHbInterval(200) .build(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java index e03c5a9f44c..82f23a6b114 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java @@ -314,7 +314,7 @@ public void testReleaseBuffersOnException() throws Exception { // is updated to the latest index in putBlock response. watcher.watchForCommit(replies.get(1).getLogIndex() + 100); Assert.fail("Expected exception not thrown"); - } catch(IOException ioe) { + } catch (IOException ioe) { // with retry count set to noRetry and a lower watch request // timeout, watch request will eventually // fail with TimeoutIOException from ratis client or the client diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index 48ac3cfe7bf..43b488d7558 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -50,8 +50,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; + import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; import org.junit.After; import org.junit.Assert; @@ -101,6 +101,7 @@ public void setup() throws Exception { conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); OzoneManager.setTestSecureOmFlag(true); conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 4f7e183586a..78a73cff657 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; @@ -29,9 +30,9 @@ import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; @@ -42,6 +43,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; @@ -55,20 +57,23 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.KeyOutputStream; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; +import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; +import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.LambdaTestUtils; import static java.nio.charset.StandardCharsets.UTF_8; @@ -93,7 +98,6 @@ import static org.junit.Assert.fail; import org.junit.BeforeClass; import org.junit.Test; -import org.junit.jupiter.api.BeforeEach; /** * Tests the containerStateMachine failure handling. @@ -132,11 +136,12 @@ public static void init() throws Exception { conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS); conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1, TimeUnit.SECONDS); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class); - ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(10)); - ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10)); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(20)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(20)); conf.setFromObject(ratisClientConfig); DatanodeRatisServerConfig ratisServerConfig = @@ -148,7 +153,7 @@ public static void init() throws Exception { RatisClientConfig.RaftConfig raftClientConfig = conf.getObject(RatisClientConfig.RaftConfig.class); raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); - raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(20)); conf.setFromObject(raftClientConfig); conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1); @@ -169,16 +174,6 @@ public static void init() throws Exception { random = new Random(); } - @BeforeEach - public void restartDatanode() - throws InterruptedException, TimeoutException, AuthenticationException, - IOException { - for (int i=0; i < cluster.getHddsDatanodes().size(); i++) { - cluster.restartHddsDatanode(i, true); - } - cluster.restartStorageContainerManager(true); - } - /** * Shutdown MiniDFSCluster. */ @@ -677,4 +672,138 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() r2.run(); } + + @Test + public void testContainerStateMachineSingleFailureRetry() + throws Exception { + OzoneOutputStream key = + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis1", 1024, ReplicationType.RATIS, + ReplicationFactor.THREE, new HashMap<>()); + + key.write("ratis".getBytes(UTF_8)); + key.flush(); + key.write("ratis".getBytes(UTF_8)); + key.write("ratis".getBytes(UTF_8)); + + KeyOutputStream groupOutputStream = (KeyOutputStream) key. + getOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + Assert.assertEquals(1, locationInfoList.size()); + + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + + induceFollowerFailure(omKeyLocationInfo, 2); + + try { + key.flush(); + key.write("ratis".getBytes(UTF_8)); + key.flush(); + key.close(); + } catch (Exception ioe) { + // Should not fail.. + Assert.fail("Exception " + ioe.getMessage()); + } + validateData("ratis1", 2, "ratisratisratisratis"); + } + + @Test + public void testContainerStateMachineDualFailureRetry() + throws Exception { + OzoneOutputStream key = + objectStore.getVolume(volumeName).getBucket(bucketName) + .createKey("ratis2", 1024, ReplicationType.RATIS, + ReplicationFactor.THREE, new HashMap<>()); + + key.write("ratis".getBytes(UTF_8)); + key.flush(); + key.write("ratis".getBytes(UTF_8)); + key.write("ratis".getBytes(UTF_8)); + + KeyOutputStream groupOutputStream = (KeyOutputStream) key. + getOutputStream(); + List locationInfoList = + groupOutputStream.getLocationInfoList(); + Assert.assertEquals(1, locationInfoList.size()); + + OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); + + induceFollowerFailure(omKeyLocationInfo, 1); + + try { + key.flush(); + key.write("ratis".getBytes(UTF_8)); + key.flush(); + key.close(); + } catch (Exception ioe) { + // Should not fail.. + Assert.fail("Exception " + ioe.getMessage()); + } + validateData("ratis1", 2, "ratisratisratisratis"); + } + + private void induceFollowerFailure(OmKeyLocationInfo omKeyLocationInfo, + int failureCount) { + UUID leader = omKeyLocationInfo.getPipeline().getLeaderId(); + Set datanodeSet = + TestHelper.getDatanodeServices(cluster, + omKeyLocationInfo.getPipeline()); + int count = 0; + for (HddsDatanodeService dn : datanodeSet) { + UUID dnUuid = dn.getDatanodeDetails().getUuid(); + if (!dnUuid.equals(leader)) { + count++; + long containerID = omKeyLocationInfo.getContainerID(); + Container container = dn + .getDatanodeStateMachine() + .getContainer() + .getContainerSet() + .getContainer(containerID); + if (container != null) { + ContainerData containerData = + container + .getContainerData(); + Assert.assertTrue(containerData instanceof KeyValueContainerData); + KeyValueContainerData keyValueContainerData = + (KeyValueContainerData) containerData; + FileUtil.fullyDelete(new File(keyValueContainerData.getChunksPath())); + } + + if (count == failureCount) { + break; + } + } + } + } + + private void validateData(String key, int locationCount, String payload) { + OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(key) + .setRefreshPipeline(true) + .build(); + OmKeyInfo keyInfo = null; + try { + keyInfo = cluster.getOzoneManager().lookupKey(omKeyArgs); + + Assert.assertEquals(locationCount, + keyInfo.getLatestVersionLocations().getLocationListCount()); + OzoneInputStream + o = objectStore + .getVolume(volumeName) + .getBucket(bucketName) + .readKey(key); + byte[] buffer = new byte[1024]; + o.read(buffer, 0, 1024); + int end = ArrayUtils.indexOf(buffer, (byte) 0); + String response = new String(buffer, 0, + end, + StandardCharsets.UTF_8); + Assert.assertEquals(payload, response); + } catch (IOException e) { + Assert.fail("Exception not expected " + e.getMessage()); + } + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index c7a28db85b0..caf3ae09293 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -47,8 +47,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.*; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; /** * Tests the containerStateMachine failure handling by set flush delay. @@ -92,6 +91,7 @@ public void setup() throws Exception { baseDir.mkdirs(); conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); // conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java index cb42ceb969c..a373de10ded 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java @@ -72,6 +72,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import org.junit.AfterClass; import org.junit.Assert; +import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Test; @@ -212,7 +213,8 @@ public void testDeleteKeyWithSlowFollower() throws Exception { KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); - Assert.assertEquals(1, locationInfoList.size()); + Assume.assumeTrue("Expected exactly a single location, but got: " + + locationInfoList.size(), 1 == locationInfoList.size()); OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0); long containerID = omKeyLocationInfo.getContainerID(); // A container is created on the datanode. Now figure out a follower node to @@ -224,7 +226,7 @@ public void testDeleteKeyWithSlowFollower() throws Exception { cluster.getStorageContainerManager().getPipelineManager() .getPipelines(new RatisReplicationConfig( HddsProtos.ReplicationFactor.THREE)); - Assert.assertTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT); + Assume.assumeTrue(pipelineList.size() >= FACTOR_THREE_PIPELINE_COUNT); Pipeline pipeline = pipelineList.get(0); for (HddsDatanodeService dn : cluster.getHddsDatanodes()) { if (RatisTestHelper.isRatisFollower(dn, pipeline)) { @@ -233,10 +235,9 @@ public void testDeleteKeyWithSlowFollower() throws Exception { leader = dn; } } - Assert.assertNotNull(follower); - Assert.assertNotNull(leader); + Assume.assumeNotNull(follower, leader); //ensure that the chosen follower is still a follower - Assert.assertTrue(RatisTestHelper.isRatisFollower(follower, pipeline)); + Assume.assumeTrue(RatisTestHelper.isRatisFollower(follower, pipeline)); // shutdown the follower node cluster.shutdownHddsDatanode(follower.getDatanodeDetails()); key.write(testData); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java index ac62bc0a1a1..b16b82473da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java @@ -59,7 +59,7 @@ /** * Tests Close Container Exception handling by Ozone Client. */ -public class TestDiscardPreallocatedBlocks{ +public class TestDiscardPreallocatedBlocks { /** * Set a timeout for each test. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index e1358eb83d1..155f785c2aa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -212,7 +212,7 @@ public void testWriteSmallFile() throws Exception { OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); String data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize/2); + .getFixedLengthString(keyString, chunkSize / 2); key.write(data.getBytes(UTF_8)); // get the name of a valid container Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index d5e60d2fd73..4ebcc8745c0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -74,7 +74,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import org.junit.AfterClass; import org.junit.Assert; @@ -178,7 +178,7 @@ public static void init() throws Exception { */ @AfterClass public static void shutdown() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -235,7 +235,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { String value = "sample value"; try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(StandardCharsets.UTF_8).length, - ReplicationType.STAND_ALONE, + ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) { out.write(value.getBytes(StandardCharsets.UTF_8)); } @@ -251,7 +251,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { byte[] fileContent; int len = 0; - try(OzoneInputStream is = bucket.readKey(keyName)) { + try (OzoneInputStream is = bucket.readKey(keyName)) { fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length]; len = is.read(fileContent); } @@ -259,7 +259,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { Assert.assertEquals(len, value.length()); Assert.assertTrue(verifyRatisReplication(bucket.getVolumeName(), - bucket.getName(), keyName, ReplicationType.STAND_ALONE, + bucket.getName(), keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); Assert.assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -267,7 +267,7 @@ private void createAndVerifyKeyData(OzoneBucket bucket) throws Exception { } private OzoneBucket createVolumeAndBucket(String volumeName, - String bucketName) throws Exception{ + String bucketName) throws Exception { store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); BucketArgs bucketArgs = BucketArgs.newBuilder() @@ -323,7 +323,7 @@ public void testKeyWithEncryptionAndGdpr() throws Exception { keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); try (OzoneOutputStream out = bucket.createKey(keyName, value.getBytes(StandardCharsets.UTF_8).length, - ReplicationType.STAND_ALONE, + ReplicationType.RATIS, ReplicationFactor.ONE, keyMetadata)) { out.write(value.getBytes(StandardCharsets.UTF_8)); } @@ -333,14 +333,14 @@ public void testKeyWithEncryptionAndGdpr() throws Exception { byte[] fileContent; int len = 0; - try(OzoneInputStream is = bucket.readKey(keyName)) { + try (OzoneInputStream is = bucket.readKey(keyName)) { fileContent = new byte[value.getBytes(StandardCharsets.UTF_8).length]; len = is.read(fileContent); } Assert.assertEquals(len, value.length()); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); Assert.assertEquals(value, new String(fileContent, StandardCharsets.UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -463,7 +463,7 @@ public void testMultipartUploadWithEncryption(OzoneBucket bucket, String keyName = "mpu_test_key_" + numParts; // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // Upload Parts @@ -498,7 +498,7 @@ public void testMultipartUploadWithEncryption(OzoneBucket bucket, // Read different data lengths and starting from different offsets and // verify the data matches. Random random = new Random(); - int randomSize = random.nextInt(keySize/2); + int randomSize = random.nextInt(keySize / 2); int randomOffset = random.nextInt(keySize - randomSize); int[] readDataSizes = {keySize, keySize / 3 + 1, BLOCK_SIZE, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index d772a3f20f8..9ea04d453fa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -50,7 +50,7 @@ import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.ozone.test.GenericTestUtils; @@ -74,7 +74,7 @@ import java.util.UUID; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -107,7 +107,7 @@ public class TestOzoneClientMultipartUploadWithFSO { @BeforeClass public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); startCluster(conf); } @@ -140,7 +140,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { * Close OzoneClient and shutdown MiniOzoneCluster. */ static void shutdownCluster() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -161,7 +161,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -173,7 +173,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(multipartInfo); Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); @@ -227,7 +227,7 @@ public void testUploadPartWithNoOverride() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -321,7 +321,7 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // Upload Parts @@ -352,7 +352,7 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // We have not uploaded any parts, but passing some list it should throw @@ -376,7 +376,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -400,7 +400,7 @@ public void testMultipartUploadWithMissingParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -426,7 +426,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -505,7 +505,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -540,7 +540,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -563,7 +563,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { ozoneManager.getMetadataManager().getBucketTable().get(buckKey); BucketLayout bucketLayout = buckInfo.getBucketLayout(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -603,24 +603,24 @@ public void testListMultipartUploadParts() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); @@ -705,24 +705,24 @@ public void testListMultipartUploadPartsWithContinuation() OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(2, @@ -808,7 +808,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -821,7 +821,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); // As we don't have any parts with greater than partNumberMarker and list @@ -866,11 +866,11 @@ public void testListMultipartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID1 = initiateMultipartUpload(bucket, key1, STAND_ALONE, + String uploadID1 = initiateMultipartUpload(bucket, key1, RATIS, ONE); - String uploadID2 = initiateMultipartUpload(bucket, key2, STAND_ALONE, + String uploadID2 = initiateMultipartUpload(bucket, key2, RATIS, ONE); - String uploadID3 = initiateMultipartUpload(bucket, key3, STAND_ALONE, + String uploadID3 = initiateMultipartUpload(bucket, key3, RATIS, ONE); // Upload Parts diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index 3cda449c57d..ea992f51982 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -191,8 +191,8 @@ public void testGroupMismatchExceptionHandling() throws Exception { @Test public void testMaxRetriesByOzoneClient() throws Exception { String keyName = getKeyName(); - OzoneOutputStream key = - createKey(keyName, ReplicationType.RATIS, (MAX_RETRIES+1) * blockSize); + OzoneOutputStream key = createKey( + keyName, ReplicationType.RATIS, (MAX_RETRIES + 1) * blockSize); Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = (KeyOutputStream) key.getOutputStream(); List entries = keyOutputStream.getStreamEntries(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 222e352154e..128c407b248 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -116,8 +116,7 @@ import static org.apache.hadoop.hdds.StringUtils.string2Bytes; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT; @@ -204,7 +203,7 @@ static void startCluster(OzoneConfiguration conf) throws Exception { * Close OzoneClient and shutdown MiniOzoneCluster. */ static void shutdownCluster() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -225,7 +224,7 @@ public static void setOzClient(OzoneClient ozClient) { TestOzoneRpcClientAbstract.ozClient = ozClient; } - public static void setOzoneManager(OzoneManager ozoneManager){ + public static void setOzoneManager(OzoneManager ozoneManager) { TestOzoneRpcClientAbstract.ozoneManager = ozoneManager; } @@ -297,6 +296,28 @@ public void testVolumeSetOwner() throws IOException { proxy.setVolumeOwner(volumeName, ownerName); } + @Test + public void testBucketSetOwner() throws IOException { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + store.createVolume(volumeName); + store.getVolume(volumeName).createBucket(bucketName); + + String oldOwner = store.getVolume(volumeName).getBucket(bucketName) + .getOwner(); + String ownerName = "testUser"; + + ClientProtocol proxy = store.getClientProxy(); + proxy.setBucketOwner(volumeName, bucketName, ownerName); + String newOwner = store.getVolume(volumeName).getBucket(bucketName) + .getOwner(); + + assertEquals(ownerName, newOwner); + assertNotEquals(oldOwner, newOwner); + store.getVolume(volumeName).deleteBucket(bucketName); + store.deleteVolume(volumeName); + } + @Test public void testSetAndClrQuota() throws Exception { String volumeName = UUID.randomUUID().toString(); @@ -838,7 +859,7 @@ public void testPutKey() throws IOException { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -848,7 +869,7 @@ public void testPutKey() throws IOException { byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -912,7 +933,7 @@ public void testCheckUsedBytesQuota() throws IOException { try { OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); + valueLength, RATIS, ONE, new HashMap<>()); for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { out.write(value.getBytes(UTF_8)); } @@ -937,7 +958,7 @@ public void testCheckUsedBytesQuota() throws IOException { bucket.setQuota(OzoneQuota.parseQuota( 5 * blockSize + " B", "100")); OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); + valueLength, RATIS, ONE, new HashMap<>()); out.close(); Assert.assertEquals(4 * blockSize, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); @@ -1070,8 +1091,8 @@ public void testBucketUsedNamespace() throws IOException { private void writeKey(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) - throws IOException{ - OzoneOutputStream out = bucket.createKey(keyName, valueLength, STAND_ALONE, + throws IOException { + OzoneOutputStream out = bucket.createKey(keyName, valueLength, RATIS, replication, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -1079,8 +1100,8 @@ private void writeKey(OzoneBucket bucket, String keyName, private void writeFile(OzoneBucket bucket, String keyName, ReplicationFactor replication, String value, int valueLength) - throws IOException{ - OzoneOutputStream out = bucket.createFile(keyName, valueLength, STAND_ALONE, + throws IOException { + OzoneOutputStream out = bucket.createFile(keyName, valueLength, RATIS, replication, true, true); out.write(value.getBytes(UTF_8)); out.close(); @@ -1102,7 +1123,7 @@ public void testUsedBytesWithUploadPart() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -1140,7 +1161,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, 0, - STAND_ALONE, ONE, new HashMap<>()); + RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); @@ -1406,7 +1427,7 @@ public void testGetKeyDetails() throws IOException { //String keyValue = "this is a test value.glx"; // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes(UTF_8).length, STAND_ALONE, + keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(keyValue.getBytes(UTF_8)); out.close(); @@ -1758,7 +1779,7 @@ public void testDeleteKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -1880,33 +1901,33 @@ public void testListVolume() throws IOException { String volBase = "vol-list-"; //Create 10 volume vol-list-a-0- to vol-list-a-9- String volBaseNameA = volBase + "a-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { store.createVolume( volBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); } //Create 10 volume vol-list-b-0- to vol-list-b-9- String volBaseNameB = volBase + "b-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { store.createVolume( volBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); } Iterator volIterator = store.listVolumes(volBase); int totalVolumeCount = 0; - while(volIterator.hasNext()) { + while (volIterator.hasNext()) { volIterator.next(); totalVolumeCount++; } Assert.assertEquals(20, totalVolumeCount); Iterator volAIterator = store.listVolumes( volBaseNameA); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volAIterator.next().getName() .startsWith(volBaseNameA + i + "-")); } Assert.assertFalse(volAIterator.hasNext()); Iterator volBIterator = store.listVolumes( volBaseNameB); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volBIterator.next().getName() .startsWith(volBaseNameB + i + "-")); } @@ -1929,7 +1950,7 @@ public void testListBucket() //Create 10 buckets in vol-a- and 10 in vol-b- String bucketBaseNameA = "bucket-a-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { volA.createBucket( bucketBaseNameA + i + "-" + RandomStringUtils.randomNumeric(5)); volB.createBucket( @@ -1937,7 +1958,7 @@ public void testListBucket() } //Create 10 buckets in vol-a- and 10 in vol-b- String bucketBaseNameB = "bucket-b-"; - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { volA.createBucket( bucketBaseNameB + i + "-" + RandomStringUtils.randomNumeric(5)); volB.createBucket( @@ -1946,7 +1967,7 @@ public void testListBucket() Iterator volABucketIter = volA.listBuckets("bucket-"); int volABucketCount = 0; - while(volABucketIter.hasNext()) { + while (volABucketIter.hasNext()) { volABucketIter.next(); volABucketCount++; } @@ -1954,7 +1975,7 @@ public void testListBucket() Iterator volBBucketIter = volA.listBuckets("bucket-"); int volBBucketCount = 0; - while(volBBucketIter.hasNext()) { + while (volBBucketIter.hasNext()) { volBBucketIter.next(); volBBucketCount++; } @@ -1963,7 +1984,7 @@ public void testListBucket() Iterator volABucketAIter = volA.listBuckets("bucket-a-"); int volABucketACount = 0; - while(volABucketAIter.hasNext()) { + while (volABucketAIter.hasNext()) { volABucketAIter.next(); volABucketACount++; } @@ -1971,21 +1992,21 @@ public void testListBucket() Iterator volBBucketBIter = volA.listBuckets("bucket-b-"); int volBBucketBCount = 0; - while(volBBucketBIter.hasNext()) { + while (volBBucketBIter.hasNext()) { volBBucketBIter.next(); volBBucketBCount++; } Assert.assertEquals(10, volBBucketBCount); Iterator volABucketBIter = volA.listBuckets( "bucket-b-"); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volABucketBIter.next().getName() .startsWith(bucketBaseNameB + i + "-")); } Assert.assertFalse(volABucketBIter.hasNext()); Iterator volBBucketAIter = volB.listBuckets( "bucket-a-"); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volBBucketAIter.next().getName() .startsWith(bucketBaseNameA + i + "-")); } @@ -2000,7 +2021,7 @@ public void testListBucketsOnEmptyVolume() store.createVolume(volume); OzoneVolume vol = store.getVolume(volume); Iterator buckets = vol.listBuckets(""); - while(buckets.hasNext()) { + while (buckets.hasNext()) { fail(); } } @@ -2035,25 +2056,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); OzoneOutputStream one = volAbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); four.write(value); four.close(); @@ -2068,25 +2089,25 @@ public void testListKey() byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); OzoneOutputStream one = volAbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); one.write(value); one.close(); OzoneOutputStream two = volAbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); two.write(value); two.close(); OzoneOutputStream three = volBbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); three.write(value); three.close(); OzoneOutputStream four = volBbucketB.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), - value.length, STAND_ALONE, ONE, + value.length, RATIS, ONE, new HashMap<>()); four.write(value); four.close(); @@ -2094,7 +2115,7 @@ public void testListKey() Iterator volABucketAIter = volAbucketA.listKeys("key-"); int volABucketAKeyCount = 0; - while(volABucketAIter.hasNext()) { + while (volABucketAIter.hasNext()) { volABucketAIter.next(); volABucketAKeyCount++; } @@ -2102,7 +2123,7 @@ public void testListKey() Iterator volABucketBIter = volAbucketB.listKeys("key-"); int volABucketBKeyCount = 0; - while(volABucketBIter.hasNext()) { + while (volABucketBIter.hasNext()) { volABucketBIter.next(); volABucketBKeyCount++; } @@ -2110,7 +2131,7 @@ public void testListKey() Iterator volBBucketAIter = volBbucketA.listKeys("key-"); int volBBucketAKeyCount = 0; - while(volBBucketAIter.hasNext()) { + while (volBBucketAIter.hasNext()) { volBBucketAIter.next(); volBBucketAKeyCount++; } @@ -2118,7 +2139,7 @@ public void testListKey() Iterator volBBucketBIter = volBbucketB.listKeys("key-"); int volBBucketBKeyCount = 0; - while(volBBucketBIter.hasNext()) { + while (volBBucketBIter.hasNext()) { volBBucketBIter.next(); volBBucketBKeyCount++; } @@ -2126,14 +2147,14 @@ public void testListKey() Iterator volABucketAKeyAIter = volAbucketA.listKeys("key-a-"); int volABucketAKeyACount = 0; - while(volABucketAKeyAIter.hasNext()) { + while (volABucketAKeyAIter.hasNext()) { volABucketAKeyAIter.next(); volABucketAKeyACount++; } Assert.assertEquals(10, volABucketAKeyACount); Iterator volABucketAKeyBIter = volAbucketA.listKeys("key-b-"); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { Assert.assertTrue(volABucketAKeyBIter.next().getName() .startsWith("key-b-" + i + "-")); } @@ -2150,7 +2171,7 @@ public void testListKeyOnEmptyBucket() vol.createBucket(bucket); OzoneBucket buc = vol.getBucket(bucket); Iterator keys = buc.listKeys(""); - while(keys.hasNext()) { + while (keys.hasNext()) { fail(); } } @@ -2167,7 +2188,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -2179,7 +2200,7 @@ public void testInitiateMultipartUploadWithReplicationInformationSet() throws // Call initiate multipart upload for the same key again, this should // generate a new uploadID. multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); Assert.assertEquals(volumeName, multipartInfo.getVolumeName()); @@ -2234,7 +2255,7 @@ public void testUploadPartWithNoOverride() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -2271,7 +2292,7 @@ public void testUploadPartOverrideWithStandAlone() throws IOException { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -2540,7 +2561,7 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // Upload Parts @@ -2573,7 +2594,7 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); // We have not uploaded any parts, but passing some list it should throw @@ -2598,7 +2619,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -2623,7 +2644,7 @@ public void testMultipartUploadWithMissingParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -2663,7 +2684,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -2698,7 +2719,7 @@ public void testCommitPartAfterCompleteUpload() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo omMultipartInfo = bucket.initiateMultipartUpload(keyName, - STAND_ALONE, ONE); + RATIS, ONE); Assert.assertNotNull(omMultipartInfo.getUploadID()); @@ -2760,7 +2781,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -2776,7 +2797,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); bucket.abortMultipartUpload(keyName, uploadID); @@ -2794,24 +2815,24 @@ public void testListMultipartUploadParts() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 3); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(3, ozoneMultipartUploadPartListParts.getPartInfoList().size()); @@ -2845,24 +2866,24 @@ public void testListMultipartUploadPartsWithContinuation() OzoneBucket bucket = volume.getBucket(bucketName); Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); String partName1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(1, partName1); - String partName2 =uploadPart(bucket, keyName, uploadID, 2, + String partName2 = uploadPart(bucket, keyName, uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(2, partName2); - String partName3 =uploadPart(bucket, keyName, uploadID, 3, + String partName3 = uploadPart(bucket, keyName, uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); partsMap.put(3, partName3); OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = bucket.listParts(keyName, uploadID, 0, 2); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); Assert.assertEquals(2, @@ -2950,7 +2971,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() OzoneBucket bucket = volume.getBucket(bucketName); - String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE, + String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -2963,7 +2984,7 @@ public void testListPartsWithPartMarkerGreaterThanPartCount() Assert.assertEquals(0, ozoneMultipartUploadPartListParts.getPartInfoList().size()); - Assert.assertEquals(STAND_ALONE, + Assert.assertEquals(RATIS, ozoneMultipartUploadPartListParts.getReplicationType()); // As we don't have any parts with greater than partNumberMarker and list @@ -3240,7 +3261,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { List expectedAcls = getAclList(new OzoneConfiguration()); // Case:1 Add new acl permission to existing acl. - if(expectedAcls.size()>0) { + if (expectedAcls.size() > 0) { OzoneAcl oldAcl = expectedAcls.get(0); OzoneAcl newAcl = new OzoneAcl(oldAcl.getType(), oldAcl.getName(), ACLType.READ_ACL, oldAcl.getAclScope()); @@ -3309,7 +3330,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { } private void writeKey(String key1, OzoneBucket bucket) throws IOException { - OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE, + OzoneOutputStream out = bucket.createKey(key1, 1024, RATIS, ONE, new HashMap<>()); out.write(RandomStringUtils.random(1024).getBytes(UTF_8)); out.close(); @@ -3431,7 +3452,7 @@ private void completeMultipartUpload(OzoneBucket bucket, String keyName, private void createTestKey(OzoneBucket bucket, String keyName, String keyValue) throws IOException { OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes(UTF_8).length, STAND_ALONE, + keyValue.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(keyValue.getBytes(UTF_8)); out.close(); @@ -3490,7 +3511,7 @@ public void testKeyReadWriteForGDPR() throws Exception { Map keyMetadata = new HashMap<>(); keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata); + text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata); out.write(text.getBytes(UTF_8)); out.close(); Assert.assertNull(keyMetadata.get(OzoneConsts.GDPR_SECRET)); @@ -3508,7 +3529,7 @@ public void testKeyReadWriteForGDPR() throws Exception { byte[] fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(text, new String(fileContent, UTF_8)); @@ -3571,7 +3592,7 @@ public void testDeletedKeyForGDPR() throws Exception { Map keyMetadata = new HashMap<>(); keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata); + text.getBytes(UTF_8).length, RATIS, ONE, keyMetadata); out.write(text.getBytes(UTF_8)); out.close(); @@ -3588,7 +3609,7 @@ public void testDeletedKeyForGDPR() throws Exception { byte[] fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(text, new String(fileContent, UTF_8)); @@ -3636,7 +3657,8 @@ public void testHeadObject() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); ReplicationConfig replicationConfig = ReplicationConfig - .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE); + .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); String value = "sample value"; store.createVolume(volumeName); @@ -3678,7 +3700,8 @@ private void createRequiredForVersioningTest(String volumeName, String bucketName, String keyName, boolean versioning) throws Exception { ReplicationConfig replicationConfig = ReplicationConfig - .fromProtoTypeAndFactor(RATIS, HddsProtos.ReplicationFactor.THREE); + .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); String value = "sample value"; store.createVolume(volumeName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java index 8ad62f222b9..c5e54db832d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientForAclAuditLog.java @@ -162,7 +162,7 @@ private static void emptyAuditLog() throws IOException { * Close OzoneClient and shutdown MiniOzoneCluster. */ private static void shutdownCluster() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } @@ -246,28 +246,28 @@ public void testXXXAclFailureAudits() throws Exception { // xxxAcl will fail as current ugi user doesn't have the required access // for volume - try{ + try { List acls = store.getAcl(volObj); } catch (Exception ex) { verifyLog(OMAction.GET_ACL.name(), volumeName, AuditEventStatus.FAILURE.name()); } - try{ + try { store.addAcl(volObj, USER_ACL); } catch (Exception ex) { verifyLog(OMAction.ADD_ACL.name(), volumeName, AuditEventStatus.FAILURE.name()); } - try{ + try { store.removeAcl(volObj, USER_ACL); } catch (Exception ex) { verifyLog(OMAction.REMOVE_ACL.name(), volumeName, AuditEventStatus.FAILURE.name()); } - try{ + try { store.setAcl(volObj, aclListToAdd); } catch (Exception ex) { verifyLog(OMAction.SET_ACL.name(), volumeName, "johndoe", "jane", @@ -282,16 +282,16 @@ private void verifyLog(String... expected) throws Exception { GenericTestUtils.waitFor(() -> (lines != null) ? true : false, 100, 60000); - try{ + try { // When log entry is expected, the log file will contain one line and // that must be equal to the expected string assertTrue(lines.size() != 0); - for(String exp: expected){ + for (String exp: expected) { assertTrue(lines.get(0).contains(exp)); } - } catch (AssertionError ex){ + } catch (AssertionError ex) { LOG.error("Error occurred in log verification", ex); - if(lines.size() != 0){ + if (lines.size() != 0) { LOG.error("Actual line ::: " + lines.get(0)); LOG.error("Expected tokens ::: " + Arrays.toString(expected)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java index 6cd85a1a5ba..5ac78b8f0f1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithKeyLatestVersion.java @@ -191,7 +191,7 @@ private void testReadKey(String volumeName, String bucketName, } private void testListStatus(String volumeName, String bucketName, - String keyName, boolean versioning) throws Exception{ + String keyName, boolean versioning) throws Exception { OzoneVolume volume = objectStore.getVolume(volumeName); OzoneBucket ozoneBucket = volume.getBucket(bucketName); List ozoneFileStatusList = ozoneBucket.listStatus(keyName, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 2e369b4d591..791a2267eac 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -55,7 +55,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.After; import org.junit.Assert; @@ -117,7 +117,7 @@ public static Collection data() { public void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - TestOMRequestUtils.configureFSOptimizedPaths(conf, + OMRequestTestUtils.configureFSOptimizedPaths(conf, true, BucketLayout.fromString(bucketLayout)); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) @@ -139,7 +139,7 @@ public void init() throws Exception { */ @After public void shutdown() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index b44afe37d7e..5abc09ec0f9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -119,7 +119,7 @@ public static void init() throws Exception { .setCertificateClient(certificateClientTest) .build(); secretManager = new OzoneBlockTokenSecretManager(new SecurityConfig(conf), - 60 *60, certificateClientTest.getCertificate(). + 60 * 60, certificateClientTest.getCertificate(). getSerialNumber().toString()); secretManager.start(certificateClientTest); cluster.getOzoneManager().startSecretManager(); @@ -160,7 +160,7 @@ public void testPutKeySuccessWithBlockToken() throws Exception { String keyName = UUID.randomUUID().toString(); try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) { out.write(value.getBytes(UTF_8)); } @@ -168,13 +168,13 @@ public void testPutKeySuccessWithBlockToken() throws Exception { OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); byte[] fileContent; - try(OzoneInputStream is = bucket.readKey(keyName)) { + try (OzoneInputStream is = bucket.readKey(keyName)) { fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); } Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, ReplicationType.STAND_ALONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE)); Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); @@ -203,7 +203,7 @@ public void testKeyOpFailureWithoutBlockToken() throws Exception { String keyName = UUID.randomUUID().toString(); try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>())) { LambdaTestUtils.intercept(IOException.class, "UNAUTHENTICATED: Fail " + "to find any token ", @@ -342,7 +342,7 @@ public void testZReadKeyWithUnhealthyContainerReplica() { */ @AfterClass public static void shutdown() throws IOException { - if(ozClient != null) { + if (ozClient != null) { ozClient.close(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index d6821c4aa65..aa6ff93d514 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -364,7 +364,7 @@ public void testWatchForCommitForGroupMismatchException() throws Exception { .watchForCommit(reply.getLogIndex() + new Random().nextInt(100) + 10); Assert.fail("Expected exception not thrown"); - } catch(Exception e) { + } catch (Exception e) { Assert.assertTrue(HddsClientUtils .checkForException(e) instanceof GroupMismatchException); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java index 9248a8a68c3..c39598e3c24 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; import org.apache.hadoop.ozone.client.io.KeyInputStream; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.junit.Assert; import org.junit.Test; @@ -31,7 +31,7 @@ */ public class TestChunkInputStream extends TestInputStreamBase { - public TestChunkInputStream(ChunkLayOutVersion layout) { + public TestChunkInputStream(ContainerLayoutVersion layout) { super(layout); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 7a237151fd5..7f0ab38f9f3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -37,8 +37,8 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.TestHelper; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -67,7 +67,7 @@ public abstract class TestInputStreamBase { private String bucketName; private String keyString; - private ChunkLayOutVersion chunkLayout; + private ContainerLayoutVersion containerLayout; private static final Random RAND = new Random(); protected static final int CHUNK_SIZE = 1024 * 1024; // 1MB @@ -81,11 +81,11 @@ public abstract class TestInputStreamBase { @Parameterized.Parameters public static Iterable parameters() { - return ChunkLayoutTestInfo.chunkLayoutParameters(); + return ContainerLayoutTestInfo.containerLayoutParameters(); } - public TestInputStreamBase(ChunkLayOutVersion layout) { - this.chunkLayout = layout; + public TestInputStreamBase(ContainerLayoutVersion layout) { + this.containerLayout = layout; } /** @@ -105,7 +105,8 @@ public void init() throws Exception { conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); - conf.set(ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY, chunkLayout.toString()); + conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, + containerLayout.toString()); ReplicationManagerConfiguration repConf = conf.getObject(ReplicationManagerConfiguration.class); @@ -197,7 +198,7 @@ void validateData(byte[] inputData, int offset, byte[] readData) { byte[] expectedData = new byte[readDataLen]; System.arraycopy(inputData, (int) offset, expectedData, 0, readDataLen); - for (int i=0; i < readDataLen; i++) { + for (int i = 0; i < readDataLen; i++) { Assert.assertEquals("Read data at does not match the input data at " + "position " + (offset + i), expectedData[i], readData[i]); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java index 3af15d3259d..4b83429e153 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java @@ -38,7 +38,7 @@ import org.apache.hadoop.ozone.client.io.KeyInputStream; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.TestHelper; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -62,7 +62,7 @@ public class TestKeyInputStream extends TestInputStreamBase { private static final Logger LOG = LoggerFactory.getLogger(TestKeyInputStream.class); - public TestKeyInputStream(ChunkLayOutVersion layout) { + public TestKeyInputStream(ContainerLayoutVersion layout) { super(layout); } @@ -77,18 +77,18 @@ public TestKeyInputStream(ChunkLayOutVersion layout) { private void randomSeek(int dataLength, KeyInputStream keyInputStream, byte[] inputData) throws Exception { // Do random seek. - for (int i=0; i=100; i-=20) { + for (int i = dataLength - 100; i >= 100; i -= 20) { validate(keyInputStream, inputData, i, 20); } // Start from begin and seek such that we read partially chunks. - for (int i=0; i()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 8977f9e83dd..8978350ad2a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -113,6 +113,7 @@ public class TestBlockDeletion { @Before public void init() throws Exception { conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); GenericTestUtils.setLogLevel(DeletedBlockLogImpl.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 3382180b32b..d629f2f6cad 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -263,7 +263,7 @@ public void testCloseContainerViaRatis() throws IOException, Container dnContainer = cluster.getHddsDatanodes().get(index) .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerID); - try(ReferenceCountedDB store = BlockUtils.getDB( + try (ReferenceCountedDB store = BlockUtils.getDB( (KeyValueContainerData) dnContainer.getContainerData(), conf)) { metadataStores.add(store); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index cb85161eec0..9b0ccd58567 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -77,6 +77,8 @@ public void setup() throws Exception { conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); + cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(ONE, 30000); } @After @@ -96,7 +98,7 @@ public void test() throws Exception { objectStore.createVolume("test"); objectStore.getVolume("test").createBucket("test"); OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") - .createKey("test", 1024, ReplicationType.STAND_ALONE, + .createKey("test", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); key.write("test".getBytes(UTF_8)); key.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index e3eccb5ff63..d298ac5cc8f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -87,6 +87,7 @@ public static void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(ONE, 30000); OzoneClient client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); @@ -243,7 +244,7 @@ public void testDeleteContainerRequestHandlerOnOpenContainer() private void createKey(String keyName) throws IOException { OzoneOutputStream key = objectStore.getVolume(volumeName) .getBucket(bucketName) - .createKey(keyName, 1024, ReplicationType.STAND_ALONE, + .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); key.write("test".getBytes(UTF_8)); key.close(); @@ -281,7 +282,7 @@ private ContainerID getContainerID(String keyName) throws IOException { private Boolean isContainerClosed(HddsDatanodeService hddsDatanodeService, long containerID) { ContainerData containerData; - containerData =hddsDatanodeService + containerData = hddsDatanodeService .getDatanodeStateMachine().getContainer().getContainerSet() .getContainer(containerID).getContainerData(); return !containerData.isOpen(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java index 61559fb0847..fb015bb1225 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java @@ -40,7 +40,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; @@ -113,7 +112,7 @@ public void testContainerMetrics() throws Exception { Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, - TestHddsDispatcher.NO_OP_ICR_SENDER)); + c -> { })); } HddsDispatcher dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null); @@ -183,7 +182,7 @@ public void testContainerMetrics() throws Exception { } // clean up volume dir File file = new File(path); - if(file.exists()) { + if (file.exists()) { FileUtil.fullyDelete(file); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 4f0c437d68e..c7bf1f858d9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -487,7 +487,7 @@ static void runAsyncTests( final List computeResults = new LinkedList<>(); int requestCount = 1000; // Create a bunch of Async calls from this test. - for(int x = 0; x { })); } HddsDispatcher dispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index b15bb67fad8..6ff49c69f1d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -56,7 +56,6 @@ import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; @@ -170,7 +169,7 @@ public void testClientServer() throws Exception { .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue()), XceiverClientGrpc::new, (dn, conf) -> new XceiverServerGrpc(dd, conf, - hddsDispatcher, caClient), (dn, p) -> {}, (p) -> {}); + hddsDispatcher, caClient), (dn, p) -> { }, (p) -> { }); } private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, @@ -195,7 +194,7 @@ private static HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId, Handler.getHandlerForContainerType(containerType, conf, dd.getUuid().toString(), containerSet, volumeSet, metrics, - TestHddsDispatcher.NO_OP_ICR_SENDER)); + c -> { })); } HddsDispatcher hddsDispatcher = new HddsDispatcher( conf, containerSet, volumeSet, handlers, context, metrics, @@ -235,7 +234,7 @@ private static void runTestClientServerRatis(RpcType rpc, int numNodes) XceiverClientRatis::newXceiverClientRatis, TestSecureContainerServer::newXceiverServerRatis, (dn, p) -> RatisTestHelper.initXceiverServerRatis(rpc, dn, p), - (p) -> {}); + (p) -> { }); } private static void runTestClientServer( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index efbb73786c4..2910fa48758 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -24,6 +24,7 @@ import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.segmentparser.DatanodeRatisLogParser; @@ -56,6 +57,7 @@ public class TestDnRatisLogParser { @Before public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).setTotalPipelineNumLimit(2).build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java index 658746a5c0c..898119f2ef1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java @@ -70,7 +70,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; /** * This class tests the data scrubber functionality. @@ -101,6 +101,7 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); ozClient = OzoneClientFactory.getRpcClient(ozoneConfig); store = ozClient.getObjectStore(); ozoneManager = cluster.getOzoneManager(); @@ -137,7 +138,7 @@ public void testOpenContainerIntegrity() throws Exception { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -147,7 +148,7 @@ public void testOpenContainerIntegrity() throws Exception { byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, - keyName, STAND_ALONE, + keyName, RATIS, ONE)); Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java index 2a7fbe826de..2a5873071a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/volume/TestDatanodeHddsVolumeFailureDetection.java @@ -62,7 +62,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE; @@ -109,6 +109,7 @@ public void init() throws Exception { .setNumDataVolumes(1) .build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); ozClient = OzoneClientFactory.getRpcClient(ozoneConfig); store = ozClient.getObjectStore(); @@ -141,7 +142,7 @@ public void testHddsVolumeFailureOnChunkFileCorrupt() throws Exception { String keyName = UUID.randomUUID().toString(); String value = "sample value"; OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); @@ -223,7 +224,7 @@ public void testHddsVolumeFailureOnContainerFileCorrupt() throws Exception { try { c1.close(); Assert.fail(); - } catch(Exception e) { + } catch (Exception e) { Assert.assertTrue(e instanceof IOException); } @@ -243,7 +244,7 @@ public void testHddsVolumeFailureOnDbFileCorrupt() throws Exception { String keyName = UUID.randomUUID().toString(); String value = "sample value"; OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes(UTF_8).length, STAND_ALONE, + value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java index 497cdc1eb4f..42905e2a408 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopDirTreeGeneratorWithFSO.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.freon; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; /** * Test for HadoopDirTreeGenerator - prefix layout. @@ -27,7 +27,7 @@ public class TestHadoopDirTreeGeneratorWithFSO protected OzoneConfiguration getOzoneConfiguration() { OzoneConfiguration conf = new OzoneConfiguration(); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); return conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java index a01d4928fca..f67783bddb1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java @@ -152,28 +152,28 @@ private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses, int span, int actualDepth) throws IOException { int depth = 0; Path p = null; - if(span > 0){ + if (span > 0) { depth = 0; - } else if(span == 0){ + } else if (span == 0) { depth = 1; - } else{ + } else { LOG.info("Span value can never be negative"); } LinkedList queue = new LinkedList(); FileStatus f1 = fileStatuses[0]; queue.add(f1); - while(queue.size() != 0){ + while (queue.size() != 0) { FileStatus f = queue.poll(); FileStatus[] temp = fs.listStatus(f.getPath()); - if(temp.length > 0){ + if (temp.length > 0) { ++depth; - for(int i = 0; i < temp.length; i++){ + for (int i = 0; i < temp.length; i++) { queue.add(temp[i]); } } - if(span == 0){ + if (span == 0) { p = f.getPath(); - } else{ + } else { p = f.getPath().getParent(); } } @@ -188,17 +188,17 @@ private Path depthBFS(FileSystem fs, FileStatus[] fileStatuses, * and count the span directories. */ - private int spanCheck(FileSystem fs, int span, Path p) throws IOException{ + private int spanCheck(FileSystem fs, int span, Path p) throws IOException { int sp = 0; int depth = 0; - if(span >= 0){ + if (span >= 0) { depth = 0; - } else{ + } else { LOG.info("Span value can never be negative"); } FileStatus[] fileStatuses = fs.listStatus(p); - for (FileStatus fileStatus : fileStatuses){ - if(fileStatus.isDirectory()){ + for (FileStatus fileStatus : fileStatuses) { + if (fileStatus.isDirectory()) { ++sp; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 46781767684..9104d987c2f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -107,7 +107,7 @@ public void testContainerReportKeyWrite() throws Exception { objectStore.getVolume(volumeName).createBucket(bucketName); OzoneOutputStream key = objectStore.getVolume(volumeName).getBucket(bucketName) - .createKey(keyName, keySize, ReplicationType.STAND_ALONE, + .createKey(keyName, keySize, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); String dataString = RandomStringUtils.randomAlphabetic(keySize); key.write(dataString.getBytes(UTF_8)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 1af0b43e7f0..68cc8242873 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -45,7 +45,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; @@ -67,7 +68,6 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -83,7 +83,8 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; @@ -152,7 +153,11 @@ public class TestKeyManagerImpl { private static long scmBlockSize; private static final String KEY_NAME = "key1"; private static final String BUCKET_NAME = "bucket1"; + private static final String VERSIONED_BUCKET_NAME = "versionedBucket1"; private static final String VOLUME_NAME = "vol1"; + private static OzoneManagerProtocol writeClient; + private static OzoneManager om; + @Rule public ExpectedException exception = ExpectedException.none(); @@ -164,7 +169,6 @@ public static void setUp() throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true"); mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class); - metadataManager = new OmMetadataManagerImpl(conf); nodeManager = new MockNodeManager(true, 10); NodeSchema[] schemas = new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA}; @@ -181,7 +185,7 @@ public static void setUp() throws Exception { configurator.setNetworkTopology(clusterMap); configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); configurator.setScmContext(SCMContext.emptyContext()); - scm = TestUtils.getScm(conf, configurator); + scm = HddsTestUtils.getScm(conf, configurator); scm.start(); scm.exitSafeMode(); scmBlockSize = (long) conf @@ -191,10 +195,17 @@ public static void setUp() throws Exception { mockScmContainerClient = Mockito.mock(StorageContainerLocationProtocol.class); - keyManager = - new KeyManagerImpl(scm.getBlockProtocolServer(), - mockScmContainerClient, metadataManager, conf, "om1", null); - prefixManager = new PrefixManagerImpl(metadataManager, false); + + OmTestManagers omTestManagers + = new OmTestManagers(conf, scm.getBlockProtocolServer(), + mockScmContainerClient); + om = omTestManagers.getOzoneManager(); + metadataManager = omTestManagers.getMetadataManager(); + keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); + prefixManager = omTestManagers.getPrefixManager(); + writeClient = omTestManagers.getWriteClient(); + + mockContainerClient(); Mockito.when(mockScmBlockLocationProtocol .allocateBlock(Mockito.anyLong(), Mockito.anyInt(), @@ -204,42 +215,60 @@ public static void setUp() throws Exception { new SCMException("SafeModePrecheck failed for allocateBlock", ResultCodes.SAFE_MODE_EXCEPTION)); createVolume(VOLUME_NAME); - createBucket(VOLUME_NAME, BUCKET_NAME); + createBucket(VOLUME_NAME, BUCKET_NAME, false); + createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true); } @AfterClass public static void cleanup() throws Exception { scm.stop(); scm.join(); - metadataManager.stop(); - keyManager.stop(); + om.stop(); FileUtils.deleteDirectory(dir); } @After public void cleanupTest() throws IOException { + mockContainerClient(); List fileStatuses = keyManager .listStatus(createBuilder().setKeyName("").build(), true, "", 100000); for (OzoneFileStatus fileStatus : fileStatuses) { if (fileStatus.isFile()) { - keyManager.deleteKey( + writeClient.deleteKey( createKeyArgs(fileStatus.getKeyInfo().getKeyName())); } else { - keyManager.deleteKey(createKeyArgs(OzoneFSUtils + writeClient.deleteKey(createKeyArgs(OzoneFSUtils .addTrailingSlashIfNeeded( fileStatus.getKeyInfo().getKeyName()))); } } } - private static void createBucket(String volumeName, String bucketName) + private static void mockContainerClient() { + ScmClient scmClient = new ScmClient(scm.getBlockProtocolServer(), + mockScmContainerClient); + HddsWhiteboxTestUtils.setInternalState(keyManager, + "scmClient", scmClient); + HddsWhiteboxTestUtils.setInternalState(om, + "scmClient", scmClient); + } + private static void mockBlockClient() { + ScmClient scmClient = new ScmClient(mockScmBlockLocationProtocol, null); + HddsWhiteboxTestUtils.setInternalState(keyManager, + "scmClient", scmClient); + HddsWhiteboxTestUtils.setInternalState(om, + "scmClient", scmClient); + } + private static void createBucket(String volumeName, String bucketName, + boolean isVersionEnabled) throws IOException { OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) + .setIsVersionEnabled(isVersionEnabled) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); } private static void createVolume(String volumeName) throws IOException { @@ -248,13 +277,12 @@ private static void createVolume(String volumeName) throws IOException { .setAdminName("bilbo") .setOwnerName("bilbo") .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); } @Test public void allocateBlockFailureInSafeMode() throws Exception { - KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, - metadataManager, conf, "om1", null); + mockBlockClient(); OmKeyArgs keyArgs = createBuilder() .setKeyName(KEY_NAME) .build(); @@ -278,7 +306,7 @@ public void allocateBlockFailureInSafeMode() throws Exception { omKeyInfo); LambdaTestUtils.intercept(OMException.class, "SafeModePrecheck failed for allocateBlock", () -> { - keyManager1 + writeClient .allocateBlock(keyArgs, 1L, new ExcludeList()); }); } @@ -286,8 +314,7 @@ public void allocateBlockFailureInSafeMode() throws Exception { @Test public void openKeyFailureInSafeMode() throws Exception { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - KeyManager keyManager1 = new KeyManagerImpl(mockScmBlockLocationProtocol, - metadataManager, conf, "om1", null); + mockBlockClient(); OmKeyArgs keyArgs = createBuilder() .setKeyName(KEY_NAME) .setDataSize(1000) @@ -297,7 +324,7 @@ public void openKeyFailureInSafeMode() throws Exception { .build(); LambdaTestUtils.intercept(OMException.class, "SafeModePrecheck failed for allocateBlock", () -> { - keyManager1.openKey(keyArgs); + writeClient.openKey(keyArgs); }); } @@ -307,7 +334,7 @@ public void openKeyWithMultipleBlocks() throws IOException { .setKeyName(UUID.randomUUID().toString()) .setDataSize(scmBlockSize * 10) .build(); - OpenKeySession keySession = keyManager.openKey(keyArgs); + OpenKeySession keySession = writeClient.openKey(keyArgs); OmKeyInfo keyInfo = keySession.getKeyInfo(); Assert.assertEquals(10, keyInfo.getLatestVersionLocations().getLocationList().size()); @@ -321,11 +348,11 @@ public void testCreateDirectory() throws IOException { OmKeyArgs keyArgs = createBuilder() .setKeyName(keyNameBuf.toString()) .build(); - for (int i =0; i< 5; i++) { + for (int i = 0; i < 5; i++) { keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5)); } String keyName = keyNameBuf.toString(); - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); Path path = Paths.get(keyName); while (path != null) { // verify parent directories are created @@ -338,32 +365,24 @@ public void testCreateDirectory() throws IOException { keyArgs = createBuilder() .setKeyName(keyName) .build(); - OpenKeySession keySession = keyManager.openKey(keyArgs); + OpenKeySession keySession = writeClient.openKey(keyArgs); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); try { - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); Assert.fail("Creation should fail for directory."); } catch (OMException e) { Assert.assertEquals(e.getResult(), OMException.ResultCodes.FILE_ALREADY_EXISTS); } - // create directory for root directory - keyName = ""; - keyArgs = createBuilder() - .setKeyName(keyName) - .build(); - keyManager.createDirectory(keyArgs); - Assert.assertTrue(keyManager.getFileStatus(keyArgs).isDirectory()); - // create directory where parent is root keyName = RandomStringUtils.randomAlphabetic(5); keyArgs = createBuilder() .setKeyName(keyName) .build(); - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); OzoneFileStatus fileStatus = keyManager.getFileStatus(keyArgs); Assert.assertTrue(fileStatus.isDirectory()); Assert.assertTrue(fileStatus.getKeyInfo().getKeyLocationVersions().get(0) @@ -377,14 +396,14 @@ public void testOpenFile() throws IOException { OmKeyArgs keyArgs = createBuilder() .setKeyName(keyName) .build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, false, false); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); // try to open created key with overWrite flag set to false try { - keyManager.createFile(keyArgs, false, false); + writeClient.createFile(keyArgs, false, false); Assert.fail("Open key should fail for non overwrite create"); } catch (OMException ex) { if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) { @@ -393,13 +412,13 @@ public void testOpenFile() throws IOException { } // create file should pass with overwrite flag set to true - keyManager.createFile(keyArgs, true, false); + writeClient.createFile(keyArgs, true, false); // try to create a file where parent directories do not exist and // recursive flag is set to false StringBuffer keyNameBuf = new StringBuffer(); keyNameBuf.append(RandomStringUtils.randomAlphabetic(5)); - for (int i =0; i< 5; i++) { + for (int i = 0; i < 5; i++) { keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5)); } keyName = keyNameBuf.toString(); @@ -407,7 +426,7 @@ public void testOpenFile() throws IOException { .setKeyName(keyName) .build(); try { - keyManager.createFile(keyArgs, false, false); + writeClient.createFile(keyArgs, false, false); Assert.fail("Open file should fail for non recursive write"); } catch (OMException ex) { if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) { @@ -416,10 +435,10 @@ public void testOpenFile() throws IOException { } // file create should pass when recursive flag is set to true - keySession = keyManager.createFile(keyArgs, false, true); + keySession = writeClient.createFile(keyArgs, false, true); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); Assert.assertTrue(keyManager .getFileStatus(keyArgs).isFile()); @@ -428,7 +447,7 @@ public void testOpenFile() throws IOException { .setKeyName("") .build(); try { - keyManager.createFile(keyArgs, true, true); + writeClient.createFile(keyArgs, true, true); Assert.fail("Open file should fail for non recursive write"); } catch (OMException ex) { if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) { @@ -442,10 +461,10 @@ public void testCheckAccessForFileKey() throws Exception { OmKeyArgs keyArgs = createBuilder() .setKeyName("testdir/deep/NOTICE.txt") .build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, false, true); + OpenKeySession keySession = writeClient.createFile(keyArgs, false, true); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); OzoneObj fileKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) .setStoreType(OzoneObj.StoreType.OZONE) @@ -453,11 +472,6 @@ public void testCheckAccessForFileKey() throws Exception { RequestContext context = currentUserReads(); Assert.assertTrue(keyManager.checkAccess(fileKey, context)); - OzoneObj parentDirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) - .setStoreType(OzoneObj.StoreType.OZONE) - .setKeyName("testdir") - .build(); - Assert.assertTrue(keyManager.checkAccess(parentDirKey, context)); } @Test @@ -477,7 +491,7 @@ public void testCheckAccessForDirectoryKey() throws Exception { OmKeyArgs keyArgs = createBuilder() .setKeyName("some/dir") .build(); - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); OzoneObj dirKey = OzoneObjInfo.Builder.fromKeyArgs(keyArgs) .setStoreType(OzoneObj.StoreType.OZONE) @@ -501,9 +515,9 @@ public void testPrefixAclOps() throws IOException { OzoneAcl ozAcl1 = new OzoneAcl(ACLIdentityType.USER, "user1", ACLType.READ, ACCESS); - prefixManager.addAcl(ozPrefix1, ozAcl1); + writeClient.addAcl(ozPrefix1, ozAcl1); - List ozAclGet = prefixManager.getAcl(ozPrefix1); + List ozAclGet = writeClient.getAcl(ozPrefix1); Assert.assertEquals(1, ozAclGet.size()); Assert.assertEquals(ozAcl1, ozAclGet.get(0)); @@ -530,8 +544,8 @@ public void testPrefixAclOps() throws IOException { acls.add(ozAcl2); acls.add(ozAcl3); - prefixManager.setAcl(ozPrefix1, acls); - ozAclGet = prefixManager.getAcl(ozPrefix1); + writeClient.setAcl(ozPrefix1, acls); + ozAclGet = writeClient.getAcl(ozPrefix1); Assert.assertEquals(2, ozAclGet.size()); int matchEntries = 0; @@ -547,27 +561,27 @@ public void testPrefixAclOps() throws IOException { } Assert.assertEquals(2, matchEntries); - boolean result = prefixManager.removeAcl(ozPrefix1, ozAcl4); + boolean result = writeClient.removeAcl(ozPrefix1, ozAcl4); Assert.assertEquals(true, result); - ozAclGet = prefixManager.getAcl(ozPrefix1); + ozAclGet = writeClient.getAcl(ozPrefix1); Assert.assertEquals(2, ozAclGet.size()); - result = prefixManager.removeAcl(ozPrefix1, ozAcl3); + result = writeClient.removeAcl(ozPrefix1, ozAcl3); Assert.assertEquals(true, result); - ozAclGet = prefixManager.getAcl(ozPrefix1); + ozAclGet = writeClient.getAcl(ozPrefix1); Assert.assertEquals(1, ozAclGet.size()); Assert.assertEquals(ozAcl2, ozAclGet.get(0)); // add dev:w - prefixManager.addAcl(ozPrefix1, ozAcl4); - ozAclGet = prefixManager.getAcl(ozPrefix1); + writeClient.addAcl(ozPrefix1, ozAcl4); + ozAclGet = writeClient.getAcl(ozPrefix1); Assert.assertEquals(2, ozAclGet.size()); // add dev:r and validate the acl bitset combined - prefixManager.addAcl(ozPrefix1, ozAcl5); - ozAclGet = prefixManager.getAcl(ozPrefix1); + writeClient.addAcl(ozPrefix1, ozAcl5); + ozAclGet = writeClient.getAcl(ozPrefix1); Assert.assertEquals(2, ozAclGet.size()); matchEntries = 0; @@ -708,10 +722,10 @@ public void testLookupFile() throws IOException { } // create a file - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, false, false); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); Assert.assertEquals(keyManager.lookupFile(keyArgs, null).getKeyName(), keyName); @@ -752,7 +766,7 @@ public void testLookupKeyWithLocation() throws IOException { } // create a key - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, false, false); // randomly select 3 datanodes List nodeList = new ArrayList<>(); nodeList.add((DatanodeDetails)scm.getClusterMap().getNode( @@ -776,7 +790,7 @@ public void testLookupKeyWithLocation() throws IOException { locationList.get(0).getLocalID())).build()); keyArgs.setLocationInfoList(locationInfoList); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(1L) .setPipelineID(pipeline.getId()).build(); List containerWithPipelines = Arrays.asList( @@ -822,7 +836,7 @@ public void testLookupKeyWithLocation() throws IOException { @Test public void testLatestLocationVersion() throws IOException { String keyName = RandomStringUtils.randomAlphabetic(5); - OmKeyArgs keyArgs = createBuilder() + OmKeyArgs keyArgs = createBuilder(VERSIONED_BUCKET_NAME) .setKeyName(keyName) .setLatestVersionLocation(true) .build(); @@ -838,7 +852,7 @@ public void testLatestLocationVersion() throws IOException { } // create a key - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, false, false); // randomly select 3 datanodes List nodeList = new ArrayList<>(); nodeList.add((DatanodeDetails)scm.getClusterMap().getNode( @@ -862,12 +876,20 @@ public void testLatestLocationVersion() throws IOException { locationList.get(0).getLocalID())).build()); keyArgs.setLocationInfoList(locationInfoList); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); + // Mock out the pipelines from the SCM + ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(1L) + .setPipelineID(pipeline.getId()).build(); + List containerWithPipelines = Arrays.asList( + new ContainerWithPipeline(containerInfo, pipeline)); + when(mockScmContainerClient.getContainerWithPipelineBatch( + Arrays.asList(1L))).thenReturn(containerWithPipelines); + OmKeyInfo key = keyManager.lookupKey(keyArgs, null); Assert.assertEquals(key.getKeyLocationVersions().size(), 1); - keySession = keyManager.createFile(keyArgs, true, true); - keyManager.commitKey(keyArgs, keySession.getId()); + keySession = writeClient.createFile(keyArgs, true, true); + writeClient.commitKey(keyArgs, keySession.getId()); // Test lookupKey (latestLocationVersion == true) key = keyManager.lookupKey(keyArgs, null); @@ -889,7 +911,7 @@ public void testLatestLocationVersion() throws IOException { key = keyManager.lookupFile(keyArgs, null); Assert.assertEquals(key.getKeyLocationVersions().size(), 1); - keyArgs = createBuilder() + keyArgs = createBuilder(VERSIONED_BUCKET_NAME) .setKeyName(keyName) .setLatestVersionLocation(false) .build(); @@ -929,12 +951,12 @@ public void testListStatusWithTableCache() throws Exception { // Add a total of 100 key entries to DB and TableCache (50 entries each) for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { // Add to DB - TestOMRequestUtils.addKeyToTable(false, + OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKeyInDB + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); } else { // Add to TableCache - TestOMRequestUtils.addKeyToTableCache( + OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKeyInCache + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); @@ -972,17 +994,17 @@ public void testListStatusWithTableCacheRecursive() throws Exception { String keyNameDir1 = "dir1"; OmKeyArgs keyArgsDir1 = createBuilder().setKeyName(keyNameDir1).build(); - keyManager.createDirectory(keyArgsDir1); + writeClient.createDirectory(keyArgsDir1); String keyNameDir1Subdir1 = "dir1" + OZONE_URI_DELIMITER + "subdir1"; OmKeyArgs keyArgsDir1Subdir1 = createBuilder().setKeyName(keyNameDir1Subdir1).build(); - keyManager.createDirectory(keyArgsDir1Subdir1); + writeClient.createDirectory(keyArgsDir1Subdir1); String keyNameDir2 = "dir2"; OmKeyArgs keyArgsDir2 = createBuilder().setKeyName(keyNameDir2).build(); - keyManager.createDirectory(keyArgsDir2); + writeClient.createDirectory(keyArgsDir2); OmKeyArgs rootDirArgs = createKeyArgs(""); // Test listStatus with recursive=false, should only have dirs under root @@ -1000,13 +1022,13 @@ public void testListStatusWithTableCacheRecursive() throws Exception { String prefixKeyInCache = "key-c"; for (int i = 1; i <= 10; i++) { if (i % 2 == 0) { // Add to DB - TestOMRequestUtils.addKeyToTable(false, + OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInDB + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); } else { // Add to TableCache - TestOMRequestUtils.addKeyToTableCache( + OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, keyNameDir1Subdir1 + OZONE_URI_DELIMITER + prefixKeyInCache + i, HddsProtos.ReplicationType.RATIS, ONE, @@ -1045,13 +1067,13 @@ public void testListStatusWithDeletedEntriesInCache() throws Exception { for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { - TestOMRequestUtils.addKeyToTable(false, + OMRequestTestUtils.addKeyToTable(false, VOLUME_NAME, BUCKET_NAME, prefixKey + i, 1000L, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); existKeySet.add(prefixKey + i); } else { - TestOMRequestUtils.addKeyToTableCache( + OMRequestTestUtils.addKeyToTableCache( VOLUME_NAME, BUCKET_NAME, prefixKey + i, HddsProtos.ReplicationType.RATIS, ONE, metadataManager); @@ -1238,123 +1260,113 @@ public void testListStatus() throws IOException { @Test public void testRefreshPipeline() throws Exception { - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build(); - try { - cluster.waitForClusterToBeReady(); - OzoneManager ozoneManager = cluster.getOzoneManager(); + OzoneManager ozoneManager = om; - StorageContainerLocationProtocol sclProtocolMock = mock( - StorageContainerLocationProtocol.class); + StorageContainerLocationProtocol sclProtocolMock = mock( + StorageContainerLocationProtocol.class); - List containerIDs = new ArrayList<>(); - containerIDs.add(100L); - containerIDs.add(200L); + List containerIDs = new ArrayList<>(); + containerIDs.add(100L); + containerIDs.add(200L); - List cps = new ArrayList<>(); - for (Long containerID : containerIDs) { - ContainerWithPipeline containerWithPipelineMock = - mock(ContainerWithPipeline.class); - when(containerWithPipelineMock.getPipeline()) - .thenReturn(getRandomPipeline()); + List cps = new ArrayList<>(); + for (Long containerID : containerIDs) { + ContainerWithPipeline containerWithPipelineMock = + mock(ContainerWithPipeline.class); + when(containerWithPipelineMock.getPipeline()) + .thenReturn(getRandomPipeline()); - ContainerInfo ci = mock(ContainerInfo.class); - when(ci.getContainerID()).thenReturn(containerID); - when(containerWithPipelineMock.getContainerInfo()).thenReturn(ci); + ContainerInfo ci = mock(ContainerInfo.class); + when(ci.getContainerID()).thenReturn(containerID); + when(containerWithPipelineMock.getContainerInfo()).thenReturn(ci); - cps.add(containerWithPipelineMock); - } + cps.add(containerWithPipelineMock); + } - when(sclProtocolMock.getContainerWithPipelineBatch(containerIDs)) - .thenReturn(cps); + when(sclProtocolMock.getContainerWithPipelineBatch(containerIDs)) + .thenReturn(cps); - ScmClient scmClientMock = mock(ScmClient.class); - when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); + ScmClient scmClientMock = mock(ScmClient.class); + when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", + "b1", "k1", ReplicationType.RATIS, + ReplicationFactor.THREE); - // Add block to key. - List omKeyLocationInfoList = new ArrayList<>(); - Pipeline pipeline = getRandomPipeline(); + // Add block to key. + List omKeyLocationInfoList = new ArrayList<>(); + Pipeline pipeline = getRandomPipeline(); - OmKeyLocationInfo omKeyLocationInfo = - new OmKeyLocationInfo.Builder().setBlockID( - new BlockID(100L, 1000L)) - .setOffset(0).setLength(100L).setPipeline(pipeline).build(); + OmKeyLocationInfo omKeyLocationInfo = + new OmKeyLocationInfo.Builder().setBlockID( + new BlockID(100L, 1000L)) + .setOffset(0).setLength(100L).setPipeline(pipeline).build(); - omKeyLocationInfoList.add(omKeyLocationInfo); + omKeyLocationInfoList.add(omKeyLocationInfo); - OmKeyLocationInfo omKeyLocationInfo2 = - new OmKeyLocationInfo.Builder().setBlockID( - new BlockID(200L, 1000L)) - .setOffset(0).setLength(100L).setPipeline(pipeline).build(); - omKeyLocationInfoList.add(omKeyLocationInfo2); + OmKeyLocationInfo omKeyLocationInfo2 = + new OmKeyLocationInfo.Builder().setBlockID( + new BlockID(200L, 1000L)) + .setOffset(0).setLength(100L).setPipeline(pipeline).build(); + omKeyLocationInfoList.add(omKeyLocationInfo2); - OmKeyLocationInfo omKeyLocationInfo3 = - new OmKeyLocationInfo.Builder().setBlockID( - new BlockID(100L, 2000L)) - .setOffset(0).setLength(100L).setPipeline(pipeline).build(); - omKeyLocationInfoList.add(omKeyLocationInfo3); + OmKeyLocationInfo omKeyLocationInfo3 = + new OmKeyLocationInfo.Builder().setBlockID( + new BlockID(100L, 2000L)) + .setOffset(0).setLength(100L).setPipeline(pipeline).build(); + omKeyLocationInfoList.add(omKeyLocationInfo3); - omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false); + omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false); - KeyManagerImpl keyManagerImpl = - new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1"); + KeyManagerImpl keyManagerImpl = + new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1"); - keyManagerImpl.refresh(omKeyInfo); + keyManagerImpl.refresh(omKeyInfo); + + verify(sclProtocolMock, times(1)) + .getContainerWithPipelineBatch(containerIDs); - verify(sclProtocolMock, times(1)) - .getContainerWithPipelineBatch(containerIDs); - } finally { - cluster.shutdown(); - } } @Test public void testRefreshPipelineException() throws Exception { - MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).build(); + + OzoneManager ozoneManager = om; + + String errorMessage = "Cannot find container!!"; + StorageContainerLocationProtocol sclProtocolMock = mock( + StorageContainerLocationProtocol.class); + doThrow(new IOException(errorMessage)).when(sclProtocolMock) + .getContainerWithPipelineBatch(anyList()); + + ScmClient scmClientMock = mock(ScmClient.class); + when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); + + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo("v1", + "b1", "k1", ReplicationType.RATIS, + ReplicationFactor.THREE); + + // Add block to key. + List omKeyLocationInfoList = new ArrayList<>(); + Pipeline pipeline = getRandomPipeline(); + + OmKeyLocationInfo omKeyLocationInfo = + new OmKeyLocationInfo.Builder().setBlockID( + new BlockID(100L, 1000L)) + .setOffset(0).setLength(100L).setPipeline(pipeline).build(); + omKeyLocationInfoList.add(omKeyLocationInfo); + omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false); + + KeyManagerImpl keyManagerImpl = + new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1"); + try { - cluster.waitForClusterToBeReady(); - OzoneManager ozoneManager = cluster.getOzoneManager(); - - String errorMessage = "Cannot find container!!"; - StorageContainerLocationProtocol sclProtocolMock = mock( - StorageContainerLocationProtocol.class); - doThrow(new IOException(errorMessage)).when(sclProtocolMock) - .getContainerWithPipelineBatch(anyList()); - - ScmClient scmClientMock = mock(ScmClient.class); - when(scmClientMock.getContainerClient()).thenReturn(sclProtocolMock); - - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo("v1", - "b1", "k1", ReplicationType.RATIS, - ReplicationFactor.THREE); - - // Add block to key. - List omKeyLocationInfoList = new ArrayList<>(); - Pipeline pipeline = getRandomPipeline(); - - OmKeyLocationInfo omKeyLocationInfo = - new OmKeyLocationInfo.Builder().setBlockID( - new BlockID(100L, 1000L)) - .setOffset(0).setLength(100L).setPipeline(pipeline).build(); - omKeyLocationInfoList.add(omKeyLocationInfo); - omKeyInfo.appendNewBlocks(omKeyLocationInfoList, false); - - KeyManagerImpl keyManagerImpl = - new KeyManagerImpl(ozoneManager, scmClientMock, conf, "om1"); - - try { - keyManagerImpl.refresh(omKeyInfo); - Assert.fail(); - } catch (OMException omEx) { - Assert.assertEquals(SCM_GET_PIPELINE_EXCEPTION, omEx.getResult()); - Assert.assertTrue(omEx.getMessage().equals(errorMessage)); - } - } finally { - cluster.shutdown(); + keyManagerImpl.refresh(omKeyInfo); + Assert.fail(); + } catch (OMException omEx) { + Assert.assertEquals(SCM_GET_PIPELINE_EXCEPTION, omEx.getResult()); + Assert.assertTrue(omEx.getMessage().equals(errorMessage)); } } @@ -1385,7 +1397,7 @@ private void createDepthTwoDirectory(String superDir, int numDirectories, throws IOException { // create super directory OmKeyArgs superDirArgs = createKeyArgs(superDir); - keyManager.createDirectory(superDirArgs); + writeClient.createDirectory(superDirArgs); directorySet.add(superDir); // add directory children to super directory @@ -1455,7 +1467,7 @@ private Set createDirectories(String parent, for (int i = 0; i < numDirectories; i++) { String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5); OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build(); - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); keyNames.add(keyName); } directoryMap.put(parent, new ArrayList<>(keyNames)); @@ -1468,11 +1480,11 @@ private List createFiles(String parent, for (int i = 0; i < numFiles; i++) { String keyName = parent + "/" + RandomStringUtils.randomAlphabetic(5); OmKeyArgs keyArgs = createBuilder().setKeyName(keyName).build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, false, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, false, false); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations() .getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); keyNames.add(keyName); } fileMap.put(parent, keyNames); @@ -1480,9 +1492,14 @@ private List createFiles(String parent, } private OmKeyArgs.Builder createBuilder() throws IOException { + return createBuilder(BUCKET_NAME); + } + + private OmKeyArgs.Builder createBuilder(String bucketName) + throws IOException { UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); return new OmKeyArgs.Builder() - .setBucketName(BUCKET_NAME) + .setBucketName(bucketName) .setDataSize(0) .setReplicationConfig( new StandaloneReplicationConfig(ONE)) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index 232dfab55b4..88f0dca71ab 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -112,7 +112,7 @@ public void testKeysPurgingByKeyDeletingService() throws Exception { String keyName = keyBase + "-" + i; keys.add(keyName); OzoneOutputStream keyStream = TestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE, KEY_SIZE, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index cd3aee41a95..42bfd11da46 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -58,7 +58,7 @@ @Timeout(500) public class TestOMRatisSnapshots { - private MiniOzoneOMHAClusterImpl cluster = null; + private MiniOzoneHAClusterImpl cluster = null; private ObjectStore objectStore; private OzoneConfiguration conf; private String clusterId; @@ -89,7 +89,7 @@ public void init() throws Exception { conf.setLong( OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, SNAPSHOT_THRESHOLD); - cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) .setOMServiceId("om-service-test1") diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java index b2c46628287..5a109f7975f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMUpgradeFinalization.java @@ -37,7 +37,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -67,7 +67,7 @@ public class TestOMUpgradeFinalization { */ @Rule public Timeout timeout = new Timeout(300000); - private MiniOzoneOMHAClusterImpl cluster; + private MiniOzoneHAClusterImpl cluster; private OzoneManager ozoneManager; private ClientProtocol clientProtocol; private int fromLayoutVersion; @@ -100,7 +100,7 @@ public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); String omServiceId = UUID.randomUUID().toString(); - cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setClusterId(UUID.randomUUID().toString()) .setScmId(UUID.randomUUID().toString()) .setOMServiceId(omServiceId) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index b8f37ce4ac8..938a75634c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -45,7 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.util.StringUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; @@ -68,7 +68,7 @@ import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; @@ -693,7 +693,7 @@ private void assertKeyRenamedEx(OzoneBucket bucket, String keyName) private void createTestKey(OzoneBucket bucket, String keyName, String keyValue) throws IOException { OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes(StandardCharsets.UTF_8).length, STAND_ALONE, + keyValue.getBytes(StandardCharsets.UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(keyValue.getBytes(StandardCharsets.UTF_8)); out.close(); @@ -704,7 +704,7 @@ private void createTestKey(OzoneBucket bucket, String keyName, private OmDirectoryInfo getDirInfo(String parentKey) throws Exception { OMMetadataManager omMetadataManager = cluster.getOzoneManager().getMetadataManager(); - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(parentKey, '/'); long parentId = bucketId; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java index 402745516fd..beaf10c71da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.debug.DBScanner; import org.apache.hadoop.ozone.debug.RDBParser; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -70,7 +70,7 @@ public void setup() throws Exception { @After public void shutdown() throws Exception { - if (dbStore!=null){ + if (dbStore != null) { dbStore.close(); } } @@ -78,7 +78,7 @@ public void shutdown() throws Exception { @Test public void testOMDB() throws Exception { File newFolder = folder.newFolder(); - if(!newFolder.exists()) { + if (!newFolder.exists()) { Assert.assertTrue(newFolder.mkdirs()); } // Dummy om.db with only keyTable @@ -88,11 +88,11 @@ public void testOMDB() throws Exception { .addTable("keyTable") .build(); // insert 5 keys - for (int i = 0; i<5; i++) { - OmKeyInfo value = TestOMRequestUtils.createOmKeyInfo("sampleVol", - "sampleBuck", "key" + (i+1), HddsProtos.ReplicationType.STAND_ALONE, + for (int i = 0; i < 5; i++) { + OmKeyInfo value = OMRequestTestUtils.createOmKeyInfo("sampleVol", + "sampleBuck", "key" + (i + 1), HddsProtos.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor.ONE); - String key = "key"+ (i); + String key = "key" + (i); Table keyTable = dbStore.getTable("keyTable"); byte[] arr = value.getProtobuf(CURRENT_VERSION).toByteArray(); keyTable.put(key.getBytes(UTF_8), arr); @@ -111,7 +111,7 @@ public void testOMDB() throws Exception { try { getKeyNames(dbScanner); Assert.fail("IllegalArgumentException is expected"); - }catch (IllegalArgumentException e){ + } catch (IllegalArgumentException e) { //ignore } @@ -177,7 +177,7 @@ private List getKeyNames(DBScanner scanner) scanner.setTableName("keyTable"); scanner.call(); Assert.assertFalse(scanner.getScannedObjects().isEmpty()); - for (Object o : scanner.getScannedObjects()){ + for (Object o : scanner.getScannedObjects()) { OmKeyInfo keyInfo = (OmKeyInfo)o; keyNames.add(keyInfo.getKeyName()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java index 941097324a6..9b9d6d8e0ba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java @@ -521,7 +521,7 @@ private void doKeyOps(OmKeyArgs keyArgs) { } try { - long id = (keySession != null)?keySession.getId():0; + long id = (keySession != null) ? keySession.getId() : 0; writeClient.commitKey(keyArgs, id); } catch (IOException ignored) { } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java index 79197bba728..f77386198e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmStartupSlvLessThanMlv.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; import org.apache.hadoop.ozone.upgrade.LayoutFeature; -import org.apache.hadoop.ozone.upgrade.TestUpgradeUtils; +import org.apache.hadoop.ozone.upgrade.UpgradeTestUtils; import org.apache.ozone.test.GenericTestUtils; @@ -60,7 +60,7 @@ public void testStartupSlvLessThanMlv() throws Exception { int mlv = largestSlv + 1; // Create version file with MLV > SLV, which should fail the cluster build. - TestUpgradeUtils.createVersionFile(omSubdir, HddsProtos.NodeType.OM, mlv); + UpgradeTestUtils.createVersionFile(omSubdir, HddsProtos.NodeType.OM, mlv); MiniOzoneCluster.Builder clusterBuilder = MiniOzoneCluster.newBuilder(conf) .setClusterId(UUID.randomUUID().toString()) @@ -70,7 +70,7 @@ public void testStartupSlvLessThanMlv() throws Exception { try { clusterBuilder.build(); Assert.fail("Expected OMException due to incorrect MLV on OM creation."); - } catch(OMException e) { + } catch (OMException e) { String expectedMessage = String.format("Cannot initialize " + "VersionManager. Metadata layout version (%s) > software layout" + " version (%s)", mlv, largestSlv); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 36a17a83e70..389217a3e2f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -240,7 +240,7 @@ public void testThreeNodeOMservice() throws Exception { OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode2Id : - expectedPeerAddress = "0.0.0.0:"+ + expectedPeerAddress = "0.0.0.0:" + OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode3Id : @@ -323,7 +323,7 @@ public void testOMHAWithUnresolvedAddresses() throws Exception { OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode2Id : - expectedPeerAddress = "0.0.0.0:"+ + expectedPeerAddress = "0.0.0.0:" + OMConfigKeys.OZONE_OM_RATIS_PORT_DEFAULT; break; case omNode3Id : diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index 6118a5ecaf4..26523171fde 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -69,7 +69,7 @@ */ public abstract class TestOzoneManagerHA { - private static MiniOzoneOMHAClusterImpl cluster = null; + private static MiniOzoneHAClusterImpl cluster = null; private static MiniOzoneCluster.Builder clusterBuilder = null; private static ObjectStore objectStore; private static OzoneConfiguration conf; @@ -91,7 +91,7 @@ public abstract class TestOzoneManagerHA { @Rule public Timeout timeout = Timeout.seconds(300); - public MiniOzoneOMHAClusterImpl getCluster() { + public MiniOzoneHAClusterImpl getCluster() { return cluster; } @@ -180,7 +180,7 @@ public static void init() throws Exception { .setOmId(omId) .setNumOfOzoneManagers(numOfOMs); - cluster = (MiniOzoneOMHAClusterImpl) clusterBuilder.build(); + cluster = (MiniOzoneHAClusterImpl) clusterBuilder.build(); cluster.waitForClusterToBeReady(); objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) .getObjectStore(); @@ -217,7 +217,7 @@ public static String createKey(OzoneBucket ozoneBucket) throws IOException { String keyName = "key" + RandomStringUtils.randomNumeric(5); String data = "data" + RandomStringUtils.randomNumeric(5); OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - data.length(), ReplicationType.STAND_ALONE, + data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); @@ -380,7 +380,7 @@ protected void createKeyTest(boolean checkSuccess) throws Exception { String value = "random data"; OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, - value.length(), ReplicationType.STAND_ALONE, + value.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java index 8c085fb2660..fc4fa36eaf3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAMetadataOnly.java @@ -62,7 +62,7 @@ import java.util.Iterator; import java.util.UUID; -import static org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl.NODE_FAILURE_TIMEOUT; +import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_DEFAULT; import static org.apache.ratis.metrics.RatisMetrics.RATIS_APPLICATION_NAME_METRICS; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java index 06a3a974c9d..3af7f011b07 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithACL.java @@ -184,7 +184,7 @@ public void testSetKeyAcl() throws Exception { public void testAddPrefixAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; + String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, READ, DEFAULT); @@ -201,7 +201,7 @@ public void testAddPrefixAcl() throws Exception { public void testRemovePrefixAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; + String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl userAcl = new OzoneAcl(USER, remoteUserName, READ, ACCESS); OzoneAcl userAcl1 = new OzoneAcl(USER, "remote", @@ -237,7 +237,7 @@ public void testRemovePrefixAcl() throws Exception { public void testSetPrefixAcl() throws Exception { OzoneBucket ozoneBucket = setupBucket(); String remoteUserName = "remoteUser"; - String prefixName = RandomStringUtils.randomAlphabetic(5) +"/"; + String prefixName = RandomStringUtils.randomAlphabetic(5) + "/"; OzoneAcl defaultUserAcl = new OzoneAcl(USER, remoteUserName, READ, DEFAULT); @@ -304,7 +304,7 @@ private void testAddAcl(String remoteUserName, OzoneObj ozoneObj, } private void testRemoveAcl(String remoteUserName, OzoneObj ozoneObj, - OzoneAcl userAcl) throws Exception{ + OzoneAcl userAcl) throws Exception { ObjectStore objectStore = getObjectStore(); // As by default create will add some default acls in RpcClient. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index a0c014a748d..0c2f526870e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -43,7 +43,7 @@ import java.util.UUID; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl.NODE_FAILURE_TIMEOUT; +import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; @@ -537,7 +537,7 @@ private void validateListParts(OzoneBucket ozoneBucket, String keyName, Assert.assertTrue(partInfoList.size() == partsMap.size()); - for (int i=0; i< partsMap.size(); i++) { + for (int i = 0; i < partsMap.size(); i++) { Assert.assertEquals(partsMap.get(partInfoList.get(i).getPartNumber()), partInfoList.get(i).getPartName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithFailover.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithFailover.java index ecbb6e80f99..a31ed2eb473 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithFailover.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithFailover.java @@ -21,7 +21,7 @@ import org.junit.Assert; import org.junit.Test; -import static org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl.NODE_FAILURE_TIMEOUT; +import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; /** * Test Ozone Manager operation in distributed handler scenario with failover. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java index fe4429eecba..003f970b4e3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java @@ -373,7 +373,7 @@ private void writeTestData(String volumeName, byte[] data = ContainerTestHelper.getFixedLengthString( keyString, 100).getBytes(UTF_8); OzoneOutputStream keyStream = TestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE, 100, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java index 9b0d8bcc940..9de622ca0d2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java @@ -88,7 +88,7 @@ public void testGetServiceList() throws Exception { ObjectMapper objectMapper = new ObjectMapper(); TypeReference> serviceInfoReference = - new TypeReference>() {}; + new TypeReference>() { }; List serviceInfos = objectMapper.readValue( serviceListJson, serviceInfoReference); Map serviceMap = new HashMap<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java index b432826660c..391f6619b1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java @@ -31,7 +31,7 @@ import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.ozone.security.acl.OzoneObjInfo; import org.apache.hadoop.security.UserGroupInformation; @@ -255,7 +255,7 @@ private void startCluster() throws Exception { // Note: OM doesn't support live config reloading conf.setBoolean(OZONE_ACL_ENABLED, true); - TestOMRequestUtils.configureFSOptimizedPaths(conf, true); + OMRequestTestUtils.configureFSOptimizedPaths(conf, true); cluster = MiniOzoneCluster.newBuilder(conf).setClusterId(clusterId) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java index 0c5431e11ea..63a4b403832 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.Assert; @@ -58,7 +58,7 @@ public void testRequestWithNonExistentBucket() String volumeName = "vol1"; String bucketName = "invalidBuck"; - OzoneManagerProtocolProtos.OMRequest omRequest = TestOMRequestUtils + OzoneManagerProtocolProtos.OMRequest omRequest = OMRequestTestUtils .createCompleteMPURequest(volumeName, bucketName, "mpuKey", "mpuKeyID", new ArrayList<>()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index c35b45bef03..4db0e8ef8a3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; @@ -47,7 +47,7 @@ */ public class TestOzoneManagerSnapshotProvider { - private MiniOzoneOMHAClusterImpl cluster = null; + private MiniOzoneHAClusterImpl cluster = null; private ObjectStore objectStore; private OzoneConfiguration conf; private String clusterId; @@ -69,7 +69,7 @@ public void init() throws Exception { omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) .setOMServiceId(omServiceId) @@ -140,4 +140,4 @@ private long getDownloadedSnapshotIndex(DBCheckpoint dbCheckpoint) return trxnInfoFromCheckpoint.getTransactionIndex(); } -} \ No newline at end of file +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java new file mode 100644 index 00000000000..2937770b735 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java @@ -0,0 +1,70 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; + +/** + * Test Recon SCM HA Snapshot Download implementation. + */ +public class TestReconScmHASnapshot { + + /** + * Set a timeout for each test. + */ + @Rule + public Timeout timeout = Timeout.seconds(100); + private OzoneConfiguration conf; + private MiniOzoneCluster ozoneCluster = null; + + @Before + public void setup() throws Exception { + conf = new OzoneConfiguration(); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); + conf.setBoolean( + ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); + conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); + conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5); + ozoneCluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(4) + .includeRecon(true) + .build(); + ozoneCluster.waitForClusterToBeReady(); + } + + @Test + public void testScmHASnapshot() throws Exception { + TestReconScmSnapshot.testSnapshot(ozoneCluster); + } + + @After + public void shutdown() throws Exception { + if (ozoneCluster != null) { + ozoneCluster.shutdown(); + } + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java new file mode 100644 index 00000000000..db5c3275954 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmSnapshot.java @@ -0,0 +1,133 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.recon; + +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.ContainerManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.recon.scm.ReconNodeManager; +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.LoggerFactory; + +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +/** + * Test Recon SCM Snapshot Download implementation. + */ +public class TestReconScmSnapshot { + /** + * Set a timeout for each test. + */ + @Rule + public Timeout timeout = Timeout.seconds(100); + private OzoneConfiguration conf; + private MiniOzoneCluster ozoneCluster = null; + + @Before + public void setup() throws Exception { + conf = new OzoneConfiguration(); + conf.setBoolean( + ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); + conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); + ozoneCluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(4) + .includeRecon(true) + .build(); + ozoneCluster.waitForClusterToBeReady(); + } + + @Test + public void testScmSnapshot() throws Exception { + testSnapshot(ozoneCluster); + } + + public static void testSnapshot(MiniOzoneCluster cluster) throws Exception { + GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + .captureLogs(LoggerFactory.getLogger( + ReconStorageContainerManagerFacade.class)); + + List reconContainers = cluster.getReconServer() + .getReconStorageContainerManager().getContainerManager() + .getContainers(); + assertEquals(0, reconContainers.size()); + + ReconNodeManager nodeManager; + nodeManager = (ReconNodeManager) cluster.getReconServer() + .getReconStorageContainerManager().getScmNodeManager(); + long keyCountBefore = nodeManager.getNodeDBKeyCount(); + + //Stopping Recon to add Containers in SCM + cluster.stopRecon(); + + ContainerManager containerManager; + containerManager = cluster.getStorageContainerManager() + .getContainerManager(); + + for (int i = 0; i < 10; i++) { + containerManager.allocateContainer(new RatisReplicationConfig( + HddsProtos.ReplicationFactor.ONE), "testOwner"); + } + + cluster.startRecon(); + + //ContainerCount after Recon DB is updated with SCM DB + containerManager = cluster.getStorageContainerManager() + .getContainerManager(); + ContainerManager reconContainerManager = cluster.getReconServer() + .getReconStorageContainerManager().getContainerManager(); + assertTrue(logCapturer.getOutput() + .contains("Recon Container Count: " + reconContainers.size() + + ", SCM Container Count: " + containerManager.getContainers().size())); + assertEquals(containerManager.getContainers().size(), + reconContainerManager.getContainers().size()); + + //PipelineCount after Recon DB is updated with SCM DB + PipelineManager scmPipelineManager = cluster.getStorageContainerManager() + .getPipelineManager(); + PipelineManager reconPipelineManager = cluster.getReconServer() + .getReconStorageContainerManager().getPipelineManager(); + assertEquals(scmPipelineManager.getPipelines().size(), + reconPipelineManager.getPipelines().size()); + + //NodeCount after Recon DB updated with SCM DB + nodeManager = (ReconNodeManager) cluster.getReconServer() + .getReconStorageContainerManager().getScmNodeManager(); + long keyCountAfter = nodeManager.getNodeDBKeyCount(); + assertEquals(keyCountAfter, keyCountBefore); + } + + @After + public void shutdown() throws Exception { + if (ozoneCluster != null) { + ozoneCluster.shutdown(); + } + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index 56233bd91ca..9e21311936d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; @@ -350,8 +350,8 @@ private LinkedTreeMap getContainerResponseMap(String containerResponse, * For test purpose each container will have only one key. */ private void addKeys(int start, int end) throws Exception { - for(int i = start; i < end; i++) { - Pipeline pipeline = TestUtils.getRandomPipeline(); + for (int i = start; i < end; i++) { + Pipeline pipeline = HddsTestUtils.getRandomPipeline(); List omKeyLocationInfoList = new ArrayList<>(); BlockID blockID = new BlockID(i, 1); OmKeyLocationInfo omKeyLocationInfo1 = getOmKeyLocationInfo(blockID, @@ -359,7 +359,7 @@ private void addKeys(int start, int end) throws Exception { omKeyLocationInfoList.add(omKeyLocationInfo1); OmKeyLocationInfoGroup omKeyLocationInfoGroup = new OmKeyLocationInfoGroup(0, omKeyLocationInfoList); - writeDataToOm("key"+i, "bucket"+i, "vol"+i, + writeDataToOm("key" + i, "bucket" + i, "vol" + i, Collections.singletonList(omKeyLocationInfoGroup)); } } @@ -367,7 +367,7 @@ private void addKeys(int start, int end) throws Exception { private long getTableKeyCount(TableIterator> iterator) { long keyCount = 0; - while(iterator.hasNext()) { + while (iterator.hasNext()) { keyCount++; iterator.next(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java index f3dc4b66ca5..e2d59dfb894 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerFSO.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.ObjectStore; @@ -73,8 +74,7 @@ public static void init() throws Exception { .includeRecon(true) .build(); cluster.waitForClusterToBeReady(); - - cluster.getStorageContainerManager().exitSafeMode(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); store = cluster.getClient().getObjectStore(); } @@ -94,7 +94,7 @@ private void writeTestData(String volumeName, byte[] data = ContainerTestHelper.getFixedLengthString( keyString, 100).getBytes(UTF_8); OzoneOutputStream keyStream = TestHelper.createKey( - keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, + keyName, ReplicationType.RATIS, ReplicationFactor.ONE, 100, store, volumeName, bucketName); keyStream.write(data); keyStream.close(); @@ -133,7 +133,7 @@ public void testNamespaceSummaryAPI() throws Exception { Assert.assertEquals(0, entity.getNumTotalDir()); for (int i = 0; i < 10; i++) { Assert.assertNotNull(impl.getOMMetadataManagerInstance() - .getVolumeTable().get("/vol"+ i)); + .getVolumeTable().get("/vol" + i)); } addKeys(10, 12, "dir"); impl.syncDataFromOM(); @@ -141,7 +141,7 @@ public void testNamespaceSummaryAPI() throws Exception { // test Recon is sync'ed with OM. for (int i = 10; i < 12; i++) { Assert.assertNotNull(impl.getOMMetadataManagerInstance() - .getVolumeTable().getSkipCache("/vol"+ i)); + .getVolumeTable().getSkipCache("/vol" + i)); } // test root response @@ -161,8 +161,8 @@ public void testNamespaceSummaryAPI() throws Exception { * For test purpose each container will have only one key. */ private void addKeys(int start, int end, String dirPrefix) throws Exception { - for(int i = start; i < end; i++) { - writeKeys("vol"+i, "bucket"+i, dirPrefix + i + "/key"+i); + for (int i = start; i < end; i++) { + writeKeys("vol" + i, "bucket" + i, dirPrefix + i + "/key" + i); } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 14ec9ff0cd1..81caef9c5ed 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.recon; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; import java.util.HashMap; import java.util.UUID; @@ -27,11 +27,12 @@ import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneOMHAClusterImpl; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; @@ -55,7 +56,7 @@ public class TestReconWithOzoneManagerHA { @Rule public Timeout timeout = Timeout.seconds(300);; - private MiniOzoneOMHAClusterImpl cluster; + private MiniOzoneHAClusterImpl cluster; private ObjectStore objectStore; private static final String OM_SERVICE_ID = "omService1"; private static final String VOL_NAME = "testrecon"; @@ -64,13 +65,14 @@ public class TestReconWithOzoneManagerHA { public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, Boolean.TRUE.toString()); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); // Sync to disk enabled RocksDBConfiguration dbConf = conf.getObject(RocksDBConfiguration.class); dbConf.setSyncOption(true); conf.setFromObject(dbConf); - cluster = (MiniOzoneOMHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) + cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newOMHABuilder(conf) .setClusterId(UUID.randomUUID().toString()) .setScmId(UUID.randomUUID().toString()) .setOMServiceId(OM_SERVICE_ID) @@ -114,7 +116,7 @@ public void testReconGetsSnapshotFromLeader() throws Exception { String expectedUrl = "http://" + (hostname.equals("0.0.0.0") ? "localhost" : hostname) + ":" + ozoneManager.get().getHttpServer().getHttpAddress().getPort() + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; String snapshotUrl = impl.getOzoneManagerSnapshotUrl(); Assert.assertEquals("OM Snapshot should be requested from the leader.", expectedUrl, snapshotUrl); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java index e480d513d05..65860551b3f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java @@ -64,7 +64,7 @@ public static void init() throws Exception { @AfterClass public static void shutdown() throws InterruptedException { - if(cluster != null) { + if (cluster != null) { cluster.shutdown(); } IOUtils.cleanupWithLogger(null, storageContainerLocationClient); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java index 94b998447c4..2e67a88347a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java @@ -102,8 +102,8 @@ public void cleanup() { public void testReplicasAreReportedForClosedContainerAfterRestart() throws Exception { // Create some keys to write data into the open containers - for (int i=0; i<10; i++) { - TestDataUtil.createKey(bucket, "key"+i, ReplicationFactor.THREE, + for (int i = 0; i < 10; i++) { + TestDataUtil.createKey(bucket, "key" + i, ReplicationFactor.THREE, ReplicationType.RATIS, "this is the content"); } StorageContainerManager scm = cluster.getStorageContainerManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java index 10ab52858ee..f0c3b1df101 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestFailoverWithSCMHA.java @@ -45,7 +45,7 @@ import java.util.Map; import java.util.UUID; -import static org.apache.hadoop.hdds.scm.TestUtils.getContainer; +import static org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java index e9a7cf33744..1932b66fa4d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMInstallSnapshotWithHA.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Iterator; import java.util.List; import java.util.UUID; @@ -120,7 +121,7 @@ public void testInstallSnapshot() throws Exception { StorageContainerManager leaderSCM = getLeader(cluster); Assert.assertNotNull(leaderSCM); // Find the inactive SCM - String followerId = getInactiveSCM(cluster).getScmId(); + String followerId = getInactiveSCM(cluster).getSCMNodeId(); StorageContainerManager followerSCM = cluster.getSCM(followerId); // Do some transactions so that the log index increases @@ -159,7 +160,7 @@ public void testInstallSnapshot() throws Exception { public void testInstallOldCheckpointFailure() throws Exception { // Get the leader SCM StorageContainerManager leaderSCM = getLeader(cluster); - String followerId = getInactiveSCM(cluster).getScmId(); + String followerId = getInactiveSCM(cluster).getSCMNodeId(); // Find the inactive SCM StorageContainerManager followerSCM = cluster.getSCM(followerId); @@ -213,7 +214,7 @@ public void testInstallOldCheckpointFailure() throws Exception { public void testInstallCorruptedCheckpointFailure() throws Exception { StorageContainerManager leaderSCM = getLeader(cluster); // Find the inactive SCM - String followerId = getInactiveSCM(cluster).getScmId(); + String followerId = getInactiveSCM(cluster).getSCMNodeId(); StorageContainerManager followerSCM = cluster.getSCM(followerId); // Do some transactions so that the log index increases writeToIncreaseLogIndex(leaderSCM, 100); @@ -316,13 +317,10 @@ static StorageContainerManager getLeader(MiniOzoneHAClusterImpl impl) { return null; } - static StorageContainerManager getInactiveSCM(MiniOzoneHAClusterImpl impl) { - for (StorageContainerManager scm : impl.getStorageContainerManagers()) { - if (!impl.isSCMActive(scm.getScmId())) { - return scm; - } - } - return null; + private static StorageContainerManager getInactiveSCM( + MiniOzoneHAClusterImpl cluster) { + Iterator inactiveScms = cluster.getInactiveSCM(); + return inactiveScms.hasNext() ? inactiveScms.next() : null; } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index 8ba2d529fc5..9e00867a1f0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -151,14 +151,14 @@ public void testSCMContainerStateCount() throws Exception { ContainerManager scmContainerManager = scm.getContainerManager(); List containerInfoList = new ArrayList<>(); - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { containerInfoList.add( scmContainerManager.allocateContainer(new StandaloneReplicationConfig( ReplicationFactor.ONE), UUID.randomUUID().toString())); } long containerID; - for (int i=0; i < 10; i++) { + for (int i = 0; i < 10; i++) { if (i % 2 == 0) { containerID = containerInfoList.get(i).getContainerID(); scmContainerManager.updateContainerState( @@ -184,7 +184,7 @@ public void testSCMContainerStateCount() throws Exception { containerStateCount = scm.getContainerStateCount(); containerStateCount.forEach((k, v) -> { - if(k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { + if (k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { assertEquals((int)v, 5); } else if (k.equals(HddsProtos.LifeCycleState.CLOSED.toString())) { assertEquals((int)v, 5); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java index a5d55051d7a..f09a150cc7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestStorageContainerManagerHA.java @@ -58,7 +58,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; -import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; +import static org.apache.hadoop.hdds.client.ReplicationType.RATIS; /** * Base class for Ozone Manager HA tests. @@ -155,7 +155,7 @@ public void testPutKey() throws Exception { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket - .createKey(keyName, value.getBytes(UTF_8).length, STAND_ALONE, ONE, + .createKey(keyName, value.getBytes(UTF_8).length, RATIS, ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java index a3bd2953f3b..8b7e0b443ee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientGrpc.java @@ -61,7 +61,7 @@ public void setup() { dns.add(MockDatanodeDetails.randomDatanodeDetails()); dnsInOrder = new ArrayList<>(); - for (int i=2; i>=0; i--) { + for (int i = 2; i >= 0; i--) { dnsInOrder.add(dns.get(i)); } @@ -82,14 +82,14 @@ public void testCorrectDnsReturnedFromPipeline() throws IOException { Assert.assertNotEquals(dns.get(0), dnsInOrder.get(0)); } - @Test(timeout=5000) + @Test(timeout = 5000) public void testRandomFirstNodeIsCommandTarget() throws IOException { final ArrayList allDNs = new ArrayList<>(dns); // Using a new Xceiver Client, call it repeatedly until all DNs in the // pipeline have been the target of the command, indicating it is shuffling // the DNs on each call with a new client. This test will timeout if this // is not happening. - while(allDNs.size() > 0) { + while (allDNs.size() > 0) { XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( @@ -112,7 +112,7 @@ public void testFirstNodeIsCorrectWithTopologyForCommandTarget() // With a new Client, make 100 calls and ensure the first sortedDN is used // each time. The logic should always use the sorted node, so we can check // only a single DN is ever seen after 100 calls. - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override public XceiverClientReply sendCommandAsync( @@ -131,7 +131,7 @@ public XceiverClientReply sendCommandAsync( public void testConnectionReusedAfterGetBlock() throws IOException { // With a new Client, make 100 calls. On each call, ensure that only one // DN is seen, indicating the same DN connection is reused. - for (int i=0; i<100; i++) { + for (int i = 0; i < 100; i++) { final Set seenDNs = new HashSet<>(); XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf) { @Override diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java index 1552ef609fc..65d754126a9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java @@ -75,10 +75,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADMIN_MONITOR_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; import static org.junit.Assert.fail; /** @@ -111,6 +108,7 @@ public static void init() { conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, interval, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1, SECONDS); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1); conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 1, SECONDS); @@ -360,14 +358,14 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() Set replicas = getContainerReplicas(container); List forMaintenance = new ArrayList<>(); - replicas.forEach(r ->forMaintenance.add(r.getDatanodeDetails())); + replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails())); scmClient.startMaintenanceNodes(forMaintenance.stream() .map(d -> getDNHostAndPort(d)) .collect(Collectors.toList()), 0); // Ensure all 3 DNs go to maintenance - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE); } @@ -381,7 +379,7 @@ public void testContainerIsReplicatedWhenAllNodesGotoMaintenance() .collect(Collectors.toList())); // Ensure all 3 DNs go to maintenance - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { waitForDnToReachOpState(dn, IN_SERVICE); } @@ -402,26 +400,26 @@ public void testEnteringMaintenanceNodeCompletesAfterSCMRestart() Set replicas = getContainerReplicas(container); List forMaintenance = new ArrayList<>(); - replicas.forEach(r ->forMaintenance.add(r.getDatanodeDetails())); + replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails())); scmClient.startMaintenanceNodes(forMaintenance.stream() .map(d -> getDNHostAndPort(d)) .collect(Collectors.toList()), 0); // Ensure all 3 DNs go to entering_maintenance - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { waitForDnToReachPersistedOpState(dn, ENTERING_MAINTENANCE); } cluster.restartStorageContainerManager(true); setManagers(); List newDns = new ArrayList<>(); - for(DatanodeDetails dn : forMaintenance) { + for (DatanodeDetails dn : forMaintenance) { newDns.add(nm.getNodeByUuid(dn.getUuid().toString())); } // Ensure all 3 DNs go to maintenance - for(DatanodeDetails dn : newDns) { + for (DatanodeDetails dn : newDns) { waitForDnToReachOpState(dn, IN_MAINTENANCE); } @@ -552,7 +550,7 @@ private void setManagers() { */ private void generateData(int keyCount, String keyPrefix, ReplicationFactor repFactor, ReplicationType repType) throws IOException { - for (int i=0; i= 0); + assert (numOfArgs >= 0); String[] res = new String[1 + 1 + numOfOMs + numOfArgs]; final int indexOmServiceIds = 0; final int indexOmNodes = 1; @@ -280,7 +280,7 @@ private String[] getHASetConfStrings(int numOfArgs) { String[] omNodesArr = omNodesVal.split(","); // Sanity check - assert(omNodesArr.length == numOfOMs); + assert (omNodesArr.length == numOfOMs); for (int i = 0; i < numOfOMs; i++) { res[indexOmAddressStart + i] = getSetConfStringFromConf(ConfUtils.addKeySuffixes( @@ -624,7 +624,7 @@ public void testDeleteTrashNoSkipTrash() throws Exception { // create volume: vol1 with bucket: bucket1 final String testVolBucket = "/vol1/bucket1"; - final String testKey = testVolBucket+"/key1"; + final String testKey = testVolBucket + "/key1"; final String[] volBucketArgs = new String[] {"-mkdir", "-p", testVolBucket}; final String[] keyArgs = new String[] {"-touch", testKey}; @@ -652,7 +652,7 @@ public void testDeleteTrashNoSkipTrash() throws Exception { final String[] rmKeyArgs = new String[] {"-rm", "-R", testKey}; final String[] rmTrashArgs = new String[] {"-rm", "-R", - testVolBucket+"/.Trash"}; + testVolBucket + "/.Trash"}; final Path trashPathKey1 = Path.mergePaths(new Path( new OFSPath(testKey).getTrashRoot(), new Path("Current")), new Path(testKey)); @@ -666,11 +666,11 @@ public void testDeleteTrashNoSkipTrash() throws Exception { Assert.assertEquals(0, res); LOG.info("Executing testDeleteTrashNoSkipTrash: key1 deleted moved to" - +" Trash: "+trashPathKey1.toString()); + + " Trash: " + trashPathKey1.toString()); fs.getFileStatus(trashPathKey1); LOG.info("Executing testDeleteTrashNoSkipTrash: deleting trash FsShell " - +"with args{}: ", Arrays.asList(rmTrashArgs)); + + "with args{}: ", Arrays.asList(rmTrashArgs)); res = ToolRunner.run(shell, rmTrashArgs); Assert.assertEquals(0, res); diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index a6c95c2e21f..694d7df6b30 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -623,6 +623,7 @@ message BucketArgs { repeated hadoop.hdds.KeyValue metadata = 7; optional uint64 quotaInBytes = 8; optional uint64 quotaInNamespace = 9; + optional string ownerName = 10; } message PrefixInfo { @@ -730,7 +731,7 @@ message SetBucketPropertyRequest { } message SetBucketPropertyResponse { - + optional bool response = 1; } message DeleteBucketRequest { @@ -1098,6 +1099,7 @@ message ServiceListRequest { message DBUpdatesRequest { required uint64 sequenceNumber = 1; + optional uint64 limitCount = 2; } message ServiceListResponse { diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java index 1786c2a8836..23cc4662f5e 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -50,7 +50,7 @@ public class TestOmKeyInfoCodec { private OmKeyInfo getKeyInfo(int chunkNum) { List omKeyLocationInfoList = new ArrayList<>(); - Pipeline pipeline = TestUtils.getRandomPipeline(); + Pipeline pipeline = HddsTestUtils.getRandomPipeline(); for (int i = 0; i < chunkNum; i++) { BlockID blockID = new BlockID(i, i); OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java index d356552352b..6dae8f1e4df 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -55,7 +55,7 @@ public class TestRepeatedOmKeyInfoCodec { private OmKeyInfo getKeyInfo(int chunkNum) { List omKeyLocationInfoList = new ArrayList<>(); - Pipeline pipeline = TestUtils.getRandomPipeline(); + Pipeline pipeline = HddsTestUtils.getRandomPipeline(); for (int i = 0; i < chunkNum; i++) { BlockID blockID = new BlockID(i, i); OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestInstanceHelper.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestInstanceHelper.java deleted file mode 100644 index 3758122d238..00000000000 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestInstanceHelper.java +++ /dev/null @@ -1,68 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.om.helpers; - -import com.google.protobuf.ByteString; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; - - -/** - * Test TestInstanceHelper. - * - * Utility methods to create test instances of protobuf related classes - */ -public final class TestInstanceHelper { - - private TestInstanceHelper(){ - super(); - } - - public static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( - String aclString){ - OzoneAcl oacl = OzoneAcl.parseAcl(aclString); - ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); - return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() - .setType(OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclType.USER) - .setName(oacl.getName()) - .setRights(rights) - .setAclScope(OzoneManagerStorageProtos. - OzoneAclInfo.OzoneAclScope.ACCESS) - .build(); - } - - public static HddsProtos.KeyValue getDefaultTestMetadata( - String key, String value) { - return HddsProtos.KeyValue.newBuilder() - .setKey(key) - .setValue(value) - .build(); - } - - public static OzoneManagerStorageProtos.PersistedPrefixInfo - getDefaultTestPrefixInfo(String name, String aclString, - HddsProtos.KeyValue metadata) { - return OzoneManagerStorageProtos.PersistedPrefixInfo.newBuilder() - .setName(name) - .addAcls(buildTestOzoneAclInfo(aclString)) - .addMetadata(metadata) - .build(); - } -} diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java index 8f00736030a..a28c6d13b57 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.helpers; +import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; @@ -36,7 +37,38 @@ */ public class TestOmPrefixInfo { - public OmPrefixInfo getOmPrefixInfoForTest(String path, + private static OzoneManagerStorageProtos.OzoneAclInfo buildTestOzoneAclInfo( + String aclString) { + OzoneAcl oacl = OzoneAcl.parseAcl(aclString); + ByteString rights = ByteString.copyFrom(oacl.getAclBitSet().toByteArray()); + return OzoneManagerStorageProtos.OzoneAclInfo.newBuilder() + .setType(OzoneManagerStorageProtos.OzoneAclInfo.OzoneAclType.USER) + .setName(oacl.getName()) + .setRights(rights) + .setAclScope(OzoneManagerStorageProtos. + OzoneAclInfo.OzoneAclScope.ACCESS) + .build(); + } + + private static HddsProtos.KeyValue getDefaultTestMetadata( + String key, String value) { + return HddsProtos.KeyValue.newBuilder() + .setKey(key) + .setValue(value) + .build(); + } + + private static OzoneManagerStorageProtos.PersistedPrefixInfo + getDefaultTestPrefixInfo(String name, String aclString, + HddsProtos.KeyValue metadata) { + return OzoneManagerStorageProtos.PersistedPrefixInfo.newBuilder() + .setName(name) + .addAcls(buildTestOzoneAclInfo(aclString)) + .addMetadata(metadata) + .build(); + } + + private OmPrefixInfo getOmPrefixInfoForTest(String path, IAccessAuthorizer.ACLIdentityType identityType, String identityString, IAccessAuthorizer.ACLType aclType, @@ -77,10 +109,9 @@ public void testgetFromProtobufOneMetadataOneAcl() { String aclString = "user:myuser:rw"; String metakey = "metakey"; String metaval = "metaval"; - HddsProtos.KeyValue metadata = TestInstanceHelper - .getDefaultTestMetadata(metakey, metaval); + HddsProtos.KeyValue metadata = getDefaultTestMetadata(metakey, metaval); OzoneManagerStorageProtos.PersistedPrefixInfo prefixInfo = - TestInstanceHelper.getDefaultTestPrefixInfo(prefixInfoPath, + getDefaultTestPrefixInfo(prefixInfoPath, aclString, metadata); OmPrefixInfo ompri = OmPrefixInfo.getFromProtobuf(prefixInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java index 058adf33d13..b818a221fb6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/BucketManagerImpl.java @@ -621,7 +621,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) } return hasAccess; } catch (IOException ex) { - if(ex instanceof OMException) { + if (ex instanceof OMException) { throw (OMException) ex; } LOG.error("CheckAccess operation failed for bucket:{}/{}.", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index b569b5dec60..f073dce2400 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -17,19 +17,12 @@ package org.apache.hadoop.ozone.om; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.fs.OzoneManagerFS; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; @@ -55,44 +48,6 @@ public interface KeyManager extends OzoneManagerFS, IOzoneAcl { */ void stop() throws IOException; - /** - * After calling commit, the key will be made visible. There can be multiple - * open key writes in parallel (identified by client id). The most recently - * committed one will be the one visible. - * - * @param args the key to commit. - * @param clientID the client that is committing. - * @throws IOException - */ - void commitKey(OmKeyArgs args, long clientID) throws IOException; - - /** - * A client calls this on an open key, to request to allocate a new block, - * and appended to the tail of current block list of the open client. - * - * @param args the key to append - * @param clientID the client requesting block. - * @param excludeList List of datanodes/containers to exclude during block - * allocation. - * @return the reference to the new block. - * @throws IOException - */ - OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, - ExcludeList excludeList) throws IOException; - - /** - * Given the args of a key to put, write an open key entry to meta data. - * - * In case that the container creation or key write failed on - * DistributedStorageHandler, this key's metadata will still stay in OM. - * TODO garbage collect the open keys that never get closed - * - * @param args the args of the key provided by client. - * @return a OpenKeySession instance client uses to talk to container. - * @throws IOException - */ - OpenKeySession openKey(OmKeyArgs args) throws IOException; - /** * Look up an existing key. Return the info of the key to client side, which * DistributedStorageHandler will use to access the data on datanode. @@ -105,26 +60,6 @@ OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, */ OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException; - /** - * Renames an existing key within a bucket. - * - * @param args the args of the key provided by client. - * @param toKeyName New name to be used for the key - * @throws IOException if specified key doesn't exist or - * some other I/O errors while renaming the key. - */ - void renameKey(OmKeyArgs args, String toKeyName) throws IOException; - - /** - * Deletes an object by an object key. The key will be immediately removed - * from OM namespace and become invisible to clients. The object data - * will be removed in async manner that might retain for some time. - * - * @param args the args of the key provided by client. - * @throws IOException if specified key doesn't exist or - * some other I/O errors while deleting an object. - */ - void deleteKey(OmKeyArgs args) throws IOException; /** * Returns a list of keys represented by {@link OmKeyInfo} @@ -192,16 +127,6 @@ List listTrash(String volumeName, String bucketName, */ List getExpiredOpenKeys(int count) throws IOException; - /** - * Deletes a expired open key by its name. Called when a hanging key has been - * lingering for too long. Once called, the open key entries gets removed - * from OM mdata data. - * - * @param objectKeyName object key name with #open# prefix. - * @throws IOException if specified key doesn't exist or other I/O errors. - */ - void deleteExpiredOpenKey(String objectKeyName) throws IOException; - /** * Returns the metadataManager. * @return OMMetadataManager. @@ -215,42 +140,6 @@ List listTrash(String volumeName, String bucketName, BackgroundService getDeletingService(); - /** - * Initiate multipart upload for the specified key. - * @param keyArgs - * @return MultipartInfo - * @throws IOException - */ - OmMultipartInfo initiateMultipartUpload(OmKeyArgs keyArgs) throws IOException; - - /** - * Commit Multipart upload part file. - * @param omKeyArgs - * @param clientID - * @return OmMultipartCommitUploadPartInfo - * @throws IOException - */ - - OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs omKeyArgs, long clientID) throws IOException; - - /** - * Complete Multipart upload Request. - * @param omKeyArgs - * @param multipartUploadList - * @return OmMultipartUploadCompleteInfo - * @throws IOException - */ - OmMultipartUploadCompleteInfo completeMultipartUpload(OmKeyArgs omKeyArgs, - OmMultipartUploadCompleteList multipartUploadList) throws IOException; - - /** - * Abort multipart upload request. - * @param omKeyArgs - * @throws IOException - */ - void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException; - OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index d64607eefc9..639893af849 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -37,7 +37,6 @@ import java.util.Stack; import java.util.TreeMap; import java.util.TreeSet; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; @@ -45,24 +44,17 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.RatisReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.UniqueId; -import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.CodecRegistry; -import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; @@ -71,7 +63,6 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; @@ -82,17 +73,11 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; -import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartUpload; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo; -import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadList; import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmPartInfo; -import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; @@ -112,14 +97,12 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Strings; -import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.READ; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.BlockTokenSecretProto.AccessModeProto.WRITE; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -132,7 +115,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_KEY_PREALLOCATION_BLOCKS_MAX_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE_DEFAULT; -import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; @@ -312,92 +294,6 @@ private OmBucketInfo validateS3Bucket(String volumeName, String bucketName) } return omBucketInfo; } - - @Override - public OmKeyLocationInfo allocateBlock(OmKeyArgs args, long clientID, - ExcludeList excludeList) throws IOException { - Preconditions.checkNotNull(args); - - - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - OMFileRequest.validateBucket(metadataManager, volumeName, bucketName); - String openKey = metadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID); - - OmKeyInfo keyInfo = - metadataManager.getOpenKeyTable(getBucketLayout()).get(openKey); - if (keyInfo == null) { - LOG.error("Allocate block for a key not in open status in meta store" + - " /{}/{}/{} with ID {}", volumeName, bucketName, keyName, clientID); - throw new OMException("Open Key not found", - KEY_NOT_FOUND); - } - - // current version not committed, so new blocks coming now are added to - // the same version - List locationInfos = - allocateBlock(keyInfo, excludeList, scmBlockSize); - - keyInfo.appendNewBlocks(locationInfos, true); - keyInfo.updateModifcationTime(); - metadataManager.getOpenKeyTable(getBucketLayout()).put(openKey, keyInfo); - - return locationInfos.get(0); - - } - - /** - * This methods avoids multiple rpc calls to SCM by allocating multiple blocks - * in one rpc call. - * @param keyInfo - key info for key to be allocated. - * @param requestedSize requested length for allocation. - * @param excludeList exclude list while allocating blocks. - * @param requestedSize requested size to be allocated. - * @return - * @throws IOException - */ - private List allocateBlock(OmKeyInfo keyInfo, - ExcludeList excludeList, long requestedSize) throws IOException { - int numBlocks = Math.min((int) ((requestedSize - 1) / scmBlockSize + 1), - preallocateBlocksMax); - List locationInfos = new ArrayList<>(numBlocks); - String remoteUser = getRemoteUser().getShortUserName(); - List allocatedBlocks; - try { - allocatedBlocks = scmClient.getBlockClient() - .allocateBlock( - scmBlockSize, - numBlocks, - keyInfo.getReplicationConfig(), - omId, - excludeList); - - } catch (SCMException ex) { - if (ex.getResult() - .equals(SCMException.ResultCodes.SAFE_MODE_EXCEPTION)) { - throw new OMException(ex.getMessage(), ResultCodes.SCM_IN_SAFE_MODE); - } - throw ex; - } - for (AllocatedBlock allocatedBlock : allocatedBlocks) { - BlockID blockID = new BlockID(allocatedBlock.getBlockID()); - OmKeyLocationInfo.Builder builder = new OmKeyLocationInfo.Builder() - .setBlockID(blockID) - .setLength(scmBlockSize) - .setOffset(0) - .setPipeline(allocatedBlock.getPipeline()); - if (grpcBlockTokenEnabled) { - builder.setToken(secretManager - .generateToken(remoteUser, blockID, - EnumSet.of(READ, WRITE), scmBlockSize)); - } - locationInfos.add(builder.build()); - } - return locationInfos; - } - /* Optimize ugi lookup for RPC operations to avoid a trip through * UGI.getCurrentUser which is synch'ed. */ @@ -428,220 +324,6 @@ public EncryptedKeyVersion run() throws IOException { Preconditions.checkNotNull(edek); return edek; } - - @Override - public OpenKeySession openKey(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args); - Preconditions.checkNotNull(args.getAcls(), "Default acls " + - "should be set."); - - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - OMFileRequest.validateBucket(metadataManager, volumeName, bucketName); - - long currentTime = UniqueId.next(); - OmKeyInfo keyInfo; - long openVersion; - // NOTE size of a key is not a hard limit on anything, it is a value that - // client should expect, in terms of current size of key. If client sets - // a value, then this value is used, otherwise, we allocate a single - // block which is the current size, if read by the client. - final long size = args.getDataSize() > 0 ? - args.getDataSize() : scmBlockSize; - final List locations = new ArrayList<>(); - - String dbKeyName = metadataManager.getOzoneKey( - args.getVolumeName(), args.getBucketName(), args.getKeyName()); - - FileEncryptionInfo encInfo; - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - OmBucketInfo bucketInfo; - try { - bucketInfo = getBucketInfo(volumeName, bucketName); - encInfo = getFileEncryptionInfo(bucketInfo); - keyInfo = prepareKeyInfo(args, dbKeyName, size, locations, encInfo); - } catch (OMException e) { - throw e; - } catch (IOException ex) { - LOG.error("Key open failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes.KEY_ALLOCATION_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - if (keyInfo == null) { - // the key does not exist, create a new object, the new blocks are the - // version 0 - keyInfo = createKeyInfo(args, locations, args.getReplicationConfig(), - size, encInfo, bucketInfo); - } - openVersion = keyInfo.getLatestVersionLocations().getVersion(); - LOG.debug("Key {} allocated in volume {} bucket {}", - keyName, volumeName, bucketName); - allocateBlockInKey(keyInfo, size, currentTime); - return new OpenKeySession(currentTime, keyInfo, openVersion); - } - - private void allocateBlockInKey(OmKeyInfo keyInfo, long size, long sessionId) - throws IOException { - String openKey = metadataManager - .getOpenKey(keyInfo.getVolumeName(), keyInfo.getBucketName(), - keyInfo.getKeyName(), sessionId); - // requested size is not required but more like a optimization: - // SCM looks at the requested, if it 0, no block will be allocated at - // the point, if client needs more blocks, client can always call - // allocateBlock. But if requested size is not 0, OM will preallocate - // some blocks and piggyback to client, to save RPC calls. - if (size > 0) { - List locationInfos = - allocateBlock(keyInfo, new ExcludeList(), size); - keyInfo.appendNewBlocks(locationInfos, true); - } - - metadataManager.getOpenKeyTable(getBucketLayout()).put(openKey, keyInfo); - - } - - private OmKeyInfo prepareKeyInfo( - OmKeyArgs keyArgs, String dbKeyName, long size, - List locations, FileEncryptionInfo encInfo) - throws IOException { - OmKeyInfo keyInfo = null; - if (keyArgs.getIsMultipartKey()) { - keyInfo = prepareMultipartKeyInfo(keyArgs, size, locations, encInfo); - } else if (metadataManager.getKeyTable( - getBucketLayout(metadataManager, keyArgs.getVolumeName(), - keyArgs.getBucketName())).isExist(dbKeyName)) { - keyInfo = metadataManager.getKeyTable( - getBucketLayout(metadataManager, keyArgs.getVolumeName(), - keyArgs.getBucketName())).get(dbKeyName); - // the key already exist, the new blocks will be added as new version - // when locations.size = 0, the new version will have identical blocks - // as its previous version - keyInfo.addNewVersion(locations, true, true); - keyInfo.setDataSize(size + keyInfo.getDataSize()); - } - if(keyInfo != null) { - keyInfo.setMetadata(keyArgs.getMetadata()); - } - return keyInfo; - } - - private OmKeyInfo prepareMultipartKeyInfo(OmKeyArgs args, long size, - List locations, FileEncryptionInfo encInfo) - throws IOException { - - Preconditions.checkArgument(args.getMultipartUploadPartNumber() > 0, - "PartNumber Should be greater than zero"); - // When key is multipart upload part key, we should take replication - // type and replication factor from original key which has done - // initiate multipart upload. If we have not found any such, we throw - // error no such multipart upload. - String uploadID = args.getMultipartUploadID(); - Preconditions.checkNotNull(uploadID); - String multipartKey = metadataManager - .getMultipartKey(args.getVolumeName(), args.getBucketName(), - args.getKeyName(), uploadID); - OmKeyInfo partKeyInfo = - metadataManager.getOpenKeyTable(getBucketLayout()).get(multipartKey); - if (partKeyInfo == null) { - throw new OMException("No such Multipart upload is with specified " + - "uploadId " + uploadID, - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } - - // For this upload part we don't need to check in KeyTable. As this - // is not an actual key, it is a part of the key. - return createKeyInfo(args, locations, - partKeyInfo.getReplicationConfig(), size, encInfo, - getBucketInfo(args.getVolumeName(), args.getBucketName())); - } - - /** - * Create OmKeyInfo object. - * @param keyArgs - * @param locations - * @param replicationConfig - * @param size - * @param encInfo - * @param omBucketInfo - * @return - */ - private OmKeyInfo createKeyInfo(OmKeyArgs keyArgs, - List locations, - ReplicationConfig replicationConfig, long size, - FileEncryptionInfo encInfo, - OmBucketInfo omBucketInfo) { - OmKeyInfo.Builder builder = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) - .setCreationTime(Time.now()) - .setModificationTime(Time.now()) - .setDataSize(size) - .setReplicationConfig(replicationConfig) - .setFileEncryptionInfo(encInfo) - .addAllMetadata(keyArgs.getMetadata()); - builder.setAcls(getAclsForKey(keyArgs, omBucketInfo)); - - if(Boolean.valueOf(omBucketInfo.getMetadata().get(OzoneConsts.GDPR_FLAG))) { - builder.addMetadata(OzoneConsts.GDPR_FLAG, Boolean.TRUE.toString()); - } - return builder.build(); - } - - @Override - public void commitKey(OmKeyArgs args, long clientID) throws IOException { - Preconditions.checkNotNull(args); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - List locationInfoList = args.getLocationInfoList(); - String objectKey = metadataManager - .getOzoneKey(volumeName, bucketName, keyName); - String openKey = metadataManager - .getOpenKey(volumeName, bucketName, keyName, clientID); - Preconditions.checkNotNull(locationInfoList); - try { - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - OMFileRequest.validateBucket(metadataManager, volumeName, bucketName); - OmKeyInfo keyInfo = - metadataManager.getOpenKeyTable(getBucketLayout()).get(openKey); - if (keyInfo == null) { - throw new OMException("Failed to commit key, as " + openKey + "entry " + - "is not found in the openKey table", KEY_NOT_FOUND); - } - keyInfo.setDataSize(args.getDataSize()); - keyInfo.setModificationTime(Time.now()); - - //update the block length for each block - keyInfo.updateLocationInfoList(locationInfoList, false); - metadataManager.getStore().move( - openKey, - objectKey, - keyInfo, - metadataManager.getOpenKeyTable(getBucketLayout()), metadataManager - .getKeyTable( - getBucketLayout(metadataManager, volumeName, bucketName))); - } catch (OMException e) { - throw e; - } catch (IOException ex) { - LOG.error("Key commit failed for volume:{} bucket:{} key:{}", - volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - ResultCodes.KEY_ALLOCATION_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - @Override public OmKeyInfo lookupKey(OmKeyArgs args, String clientAddress) throws IOException { @@ -835,135 +517,6 @@ protected Map refreshPipeline( } } - @Override - public void renameKey(OmKeyArgs args, String toKeyName) throws IOException { - Preconditions.checkNotNull(args); - Preconditions.checkNotNull(toKeyName); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String fromKeyName = args.getKeyName(); - if (toKeyName.length() == 0 || fromKeyName.length() == 0) { - LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}", - volumeName, bucketName, fromKeyName, toKeyName); - throw new OMException("Key name is empty", - ResultCodes.INVALID_KEY_NAME); - } - - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - try { - // fromKeyName should exist - String fromKey = metadataManager.getOzoneKey( - volumeName, bucketName, fromKeyName); - OmKeyInfo fromKeyValue = metadataManager - .getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)) - .get(fromKey); - if (fromKeyValue == null) { - // TODO: Add support for renaming open key - LOG.error( - "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " - + "Key: {} not found.", volumeName, bucketName, fromKeyName, - toKeyName, fromKeyName); - throw new OMException("Key not found", - KEY_NOT_FOUND); - } - - // A rename is a no-op if the target and source name is same. - // TODO: Discuss if we need to throw?. - if (fromKeyName.equals(toKeyName)) { - return; - } - - // toKeyName should not exist - String toKey = - metadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - OmKeyInfo toKeyValue = metadataManager - .getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)) - .get(toKey); - if (toKeyValue != null) { - LOG.error( - "Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}. " - + "Key: {} already exists.", volumeName, bucketName, - fromKeyName, toKeyName, toKeyName); - throw new OMException("Key already exists", - OMException.ResultCodes.KEY_ALREADY_EXISTS); - } - - fromKeyValue.setKeyName(toKeyName); - fromKeyValue.updateModifcationTime(); - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - metadataManager.getKeyTable( - getBucketLayout(metadataManager, volumeName, bucketName)) - .deleteWithBatch(batch, fromKey); - metadataManager.getKeyTable( - getBucketLayout(metadataManager, volumeName, bucketName)) - .putWithBatch(batch, toKey, fromKeyValue); - store.commitBatchOperation(batch); - } - } catch (IOException ex) { - if (ex instanceof OMException) { - throw ex; - } - LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} toKey:{}", - volumeName, bucketName, fromKeyName, toKeyName, ex); - throw new OMException(ex.getMessage(), - ResultCodes.KEY_RENAME_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public void deleteKey(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - try { - String objectKey = metadataManager.getOzoneKey( - volumeName, bucketName, keyName); - OmKeyInfo keyInfo = metadataManager - .getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)) - .get(objectKey); - if (keyInfo == null) { - throw new OMException("Key not found", - KEY_NOT_FOUND); - } else { - // directly delete key with no blocks from db. This key need not be - // moved to deleted table. - if (isKeyEmpty(keyInfo)) { - metadataManager.getKeyTable( - getBucketLayout(metadataManager, volumeName, bucketName)) - .delete(objectKey); - LOG.debug("Key {} deleted from OM DB", keyName); - return; - } - } - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable().get(objectKey); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete(keyInfo, - repeatedOmKeyInfo, 0L, false); - metadataManager - .getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)) - .delete(objectKey); - metadataManager.getDeletedTable().put(objectKey, repeatedOmKeyInfo); - } catch (OMException ex) { - throw ex; - } catch (IOException ex) { - LOG.error(String.format("Delete key failed for volume:%s " - + "bucket:%s key:%s", volumeName, bucketName, keyName), ex); - throw new OMException(ex.getMessage(), ex, - ResultCodes.KEY_DELETION_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - private boolean isKeyEmpty(OmKeyInfo keyInfo) { for (OmKeyLocationInfoGroup keyLocationList : keyInfo .getKeyLocationVersions()) { @@ -1028,12 +581,6 @@ public List getExpiredOpenKeys(int count) throws IOException { return metadataManager.getExpiredOpenKeys(count); } - @Override - public void deleteExpiredOpenKey(String objectKeyName) throws IOException { - Preconditions.checkNotNull(objectKeyName); - // TODO: Fix this in later patches. - } - @Override public OMMetadataManager getMetadataManager() { return metadataManager; @@ -1049,355 +596,6 @@ public BackgroundService getDirDeletingService() { return dirDeletingService; } - @Override - public OmMultipartInfo initiateMultipartUpload(OmKeyArgs omKeyArgs) throws - IOException { - Preconditions.checkNotNull(omKeyArgs); - String uploadID = UUID.randomUUID().toString() + "-" + UniqueId.next(); - return createMultipartInfo(omKeyArgs, uploadID); - } - - private OmMultipartInfo createMultipartInfo(OmKeyArgs keyArgs, - String multipartUploadID) throws IOException { - String volumeName = keyArgs.getVolumeName(); - String bucketName = keyArgs.getBucketName(); - String keyName = keyArgs.getKeyName(); - - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - OmBucketInfo bucketInfo = validateS3Bucket(volumeName, bucketName); - try { - - // We are adding uploadId to key, because if multiple users try to - // perform multipart upload on the same key, each will try to upload, who - // ever finally commit the key, we see that key in ozone. Suppose if we - // don't add id, and use the same key /volume/bucket/key, when multiple - // users try to upload the key, we update the parts of the key's from - // multiple users to same key, and the key output can be a mix of the - // parts from multiple users. - - // So on same key if multiple time multipart upload is initiated we - // store multiple entries in the openKey Table. - // Checked AWS S3, when we try to run multipart upload, each time a - // new uploadId is returned. - - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, multipartUploadID); - - // Not checking if there is an already key for this in the keyTable, as - // during final complete multipart upload we take care of this. - - long currentTime = Time.now(); - Map partKeyInfoMap = new HashMap<>(); - OmMultipartKeyInfo multipartKeyInfo = new OmMultipartKeyInfo.Builder() - .setUploadID(multipartUploadID) - .setCreationTime(currentTime) - .setReplicationConfig(keyArgs.getReplicationConfig()) - .setPartKeyInfoList(partKeyInfoMap) - .build(); - Map> locations = new HashMap<>(); - OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() - .setVolumeName(keyArgs.getVolumeName()) - .setBucketName(keyArgs.getBucketName()) - .setKeyName(keyArgs.getKeyName()) - .setCreationTime(currentTime) - .setModificationTime(currentTime) - .setReplicationConfig(keyArgs.getReplicationConfig()) - .setOmKeyLocationInfos(Collections.singletonList( - new OmKeyLocationInfoGroup(0, locations))) - .setAcls(getAclsForKey(keyArgs, bucketInfo)) - .build(); - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - // Create an entry in open key table and multipart info table for - // this key. - metadataManager.getMultipartInfoTable().putWithBatch(batch, - multipartKey, multipartKeyInfo); - metadataManager.getOpenKeyTable(getBucketLayout()).putWithBatch(batch, - multipartKey, omKeyInfo); - store.commitBatchOperation(batch); - return new OmMultipartInfo(volumeName, bucketName, keyName, - multipartUploadID); - } - } catch (IOException ex) { - LOG.error("Initiate Multipart upload Failed for volume:{} bucket:{} " + - "key:{}", volumeName, bucketName, keyName, ex); - throw new OMException(ex.getMessage(), - ResultCodes.INITIATE_MULTIPART_UPLOAD_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - private List getAclsForKey(OmKeyArgs keyArgs, - OmBucketInfo bucketInfo) { - List acls = new ArrayList<>(); - - if(keyArgs.getAcls() != null) { - acls.addAll(keyArgs.getAcls()); - } - - // Inherit DEFAULT acls from prefix. - if(prefixManager != null) { - List prefixList = prefixManager.getLongestPrefixPath( - OZONE_URI_DELIMITER + - keyArgs.getVolumeName() + OZONE_URI_DELIMITER + - keyArgs.getBucketName() + OZONE_URI_DELIMITER + - keyArgs.getKeyName()); - - if (!prefixList.isEmpty()) { - // Add all acls from direct parent to key. - OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); - if(prefixInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { - return acls; - } - } - } - } - - // Inherit DEFAULT acls from bucket only if DEFAULT acls for - // prefix are not set. - if (bucketInfo != null) { - if (OzoneAclUtil.inheritDefaultAcls(acls, bucketInfo.getAcls())) { - return acls; - } - } - - // TODO: do we need to further fallback to volume default ACL - return acls; - } - - @Override - public OmMultipartCommitUploadPartInfo commitMultipartUploadPart( - OmKeyArgs omKeyArgs, long clientID) throws IOException { - Preconditions.checkNotNull(omKeyArgs); - String volumeName = omKeyArgs.getVolumeName(); - String bucketName = omKeyArgs.getBucketName(); - String keyName = omKeyArgs.getKeyName(); - String uploadID = omKeyArgs.getMultipartUploadID(); - int partNumber = omKeyArgs.getMultipartUploadPartNumber(); - - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - validateS3Bucket(volumeName, bucketName); - String partName; - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - OmMultipartKeyInfo multipartKeyInfo = metadataManager - .getMultipartInfoTable().get(multipartKey); - - String openKey = metadataManager.getOpenKey( - volumeName, bucketName, keyName, clientID); - OmKeyInfo keyInfo = - metadataManager.getOpenKeyTable(getBucketLayout()).get(openKey); - - // set the data size and location info list - keyInfo.setDataSize(omKeyArgs.getDataSize()); - keyInfo.updateLocationInfoList(omKeyArgs.getLocationInfoList(), true); - - partName = metadataManager.getOzoneKey(volumeName, bucketName, keyName) - + clientID; - if (multipartKeyInfo == null) { - // This can occur when user started uploading part by the time commit - // of that part happens, in between the user might have requested - // abort multipart upload. If we just throw exception, then the data - // will not be garbage collected, so move this part to delete table - // and throw error - // Move this part to delete table. - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable().get(partName); - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - keyInfo, repeatedOmKeyInfo, 0L, false); - metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo); - throw new OMException("No such Multipart upload is with specified " + - "uploadId " + uploadID, ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - PartKeyInfo oldPartKeyInfo = - multipartKeyInfo.getPartKeyInfo(partNumber); - PartKeyInfo.Builder partKeyInfo = PartKeyInfo.newBuilder(); - partKeyInfo.setPartName(partName); - partKeyInfo.setPartNumber(partNumber); - // TODO remove unused write code path - partKeyInfo.setPartKeyInfo(keyInfo.getProtobuf(CURRENT_VERSION)); - multipartKeyInfo.addPartKeyInfo(partNumber, partKeyInfo.build()); - if (oldPartKeyInfo == null) { - // This is the first time part is being added. - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - metadataManager.getOpenKeyTable(getBucketLayout()) - .deleteWithBatch(batch, openKey); - metadataManager.getMultipartInfoTable().putWithBatch(batch, - multipartKey, multipartKeyInfo); - store.commitBatchOperation(batch); - } - } else { - // If we have this part already, that means we are overriding it. - // We need to 3 steps. - // Add the old entry to delete table. - // Remove the new entry from openKey table. - // Add the new entry in to the list of part keys. - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - OmKeyInfo partKey = OmKeyInfo.getFromProtobuf( - oldPartKeyInfo.getPartKeyInfo()); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable() - .get(oldPartKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - partKey, repeatedOmKeyInfo, 0L, false); - - metadataManager.getDeletedTable().put(partName, repeatedOmKeyInfo); - metadataManager.getDeletedTable().putWithBatch(batch, - oldPartKeyInfo.getPartName(), - repeatedOmKeyInfo); - metadataManager.getOpenKeyTable(getBucketLayout()) - .deleteWithBatch(batch, openKey); - metadataManager.getMultipartInfoTable().putWithBatch(batch, - multipartKey, multipartKeyInfo); - store.commitBatchOperation(batch); - } - } - } - } catch (IOException ex) { - LOG.error("Upload part Failed: volume:{} bucket:{} " + - "key:{} PartNumber: {}", volumeName, bucketName, keyName, - partNumber, ex); - throw new OMException(ex.getMessage(), - ResultCodes.MULTIPART_UPLOAD_PARTFILE_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - - return new OmMultipartCommitUploadPartInfo(partName); - - } - - @Override - @SuppressWarnings("methodlength") - public OmMultipartUploadCompleteInfo completeMultipartUpload( - OmKeyArgs omKeyArgs, OmMultipartUploadCompleteList multipartUploadList) - throws IOException { - Preconditions.checkNotNull(omKeyArgs); - Preconditions.checkNotNull(multipartUploadList); - String volumeName = omKeyArgs.getVolumeName(); - String bucketName = omKeyArgs.getBucketName(); - String keyName = omKeyArgs.getKeyName(); - String uploadID = omKeyArgs.getMultipartUploadID(); - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - validateS3Bucket(volumeName, bucketName); - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - - OmMultipartKeyInfo multipartKeyInfo = metadataManager - .getMultipartInfoTable().get(multipartKey); - if (multipartKeyInfo == null) { - throw new OMException("Complete Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } - //TODO: Actual logic has been removed from this, and the old code has a - // bug. New code for this is in S3MultipartUploadCompleteRequest. - // This code will be cleaned up as part of HDDS-2353. - - return new OmMultipartUploadCompleteInfo(omKeyArgs.getVolumeName(), - omKeyArgs.getBucketName(), omKeyArgs.getKeyName(), DigestUtils - .sha256Hex(keyName)); - } catch (OMException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Complete Multipart Upload Failed: volume: " + volumeName + - "bucket: " + bucketName + "key: " + keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes - .COMPLETE_MULTIPART_UPLOAD_ERROR); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - - @Override - public void abortMultipartUpload(OmKeyArgs omKeyArgs) throws IOException { - - Preconditions.checkNotNull(omKeyArgs); - String volumeName = omKeyArgs.getVolumeName(); - String bucketName = omKeyArgs.getBucketName(); - String keyName = omKeyArgs.getKeyName(); - String uploadID = omKeyArgs.getMultipartUploadID(); - Preconditions.checkNotNull(uploadID, "uploadID cannot be null"); - validateS3Bucket(volumeName, bucketName); - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - OmBucketInfo bucketInfo; - try { - String multipartKey = metadataManager.getMultipartKey(volumeName, - bucketName, keyName, uploadID); - OmMultipartKeyInfo multipartKeyInfo = metadataManager - .getMultipartInfoTable().get(multipartKey); - OmKeyInfo openKeyInfo = - metadataManager.getOpenKeyTable(getBucketLayout()).get(multipartKey); - - // If there is no entry in openKeyTable, then there is no multipart - // upload initiated for this key. - if (openKeyInfo == null) { - LOG.error("Abort Multipart Upload Failed: volume: {} bucket: {} " - + "key: {} with error no such uploadID: {}", volumeName, - bucketName, keyName, uploadID); - throw new OMException("Abort Multipart Upload Failed: volume: " + - volumeName + "bucket: " + bucketName + "key: " + keyName, - ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR); - } else { - // Move all the parts to delete table - TreeMap partKeyInfoMap = multipartKeyInfo - .getPartKeyInfoMap(); - DBStore store = metadataManager.getStore(); - try (BatchOperation batch = store.initBatchOperation()) { - for (Map.Entry partKeyInfoEntry : partKeyInfoMap - .entrySet()) { - PartKeyInfo partKeyInfo = partKeyInfoEntry.getValue(); - OmKeyInfo currentKeyPartInfo = OmKeyInfo.getFromProtobuf( - partKeyInfo.getPartKeyInfo()); - - RepeatedOmKeyInfo repeatedOmKeyInfo = - metadataManager.getDeletedTable() - .get(partKeyInfo.getPartName()); - - repeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - currentKeyPartInfo, repeatedOmKeyInfo, 0L, false); - - metadataManager.getDeletedTable().putWithBatch(batch, - partKeyInfo.getPartName(), repeatedOmKeyInfo); - } - // Finally delete the entry from the multipart info table and open - // key table - metadataManager.getMultipartInfoTable().deleteWithBatch(batch, - multipartKey); - metadataManager.getOpenKeyTable(getBucketLayout()) - .deleteWithBatch(batch, multipartKey); - store.commitBatchOperation(batch); - } - } - } catch (OMException ex) { - throw ex; - } catch (IOException ex) { - LOG.error("Abort Multipart Upload Failed: volume: " + volumeName + - "bucket: " + bucketName + "key: " + keyName, ex); - throw new OMException(ex.getMessage(), ResultCodes - .ABORT_MULTIPART_UPLOAD_FAILED); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - - } - @Override public OmMultipartUploadList listMultipartUploads(String volumeName, String bucketName, String prefix) throws OMException { @@ -1551,7 +749,7 @@ public OmMultipartUploadListParts listParts(String volumeName, } } catch (OMException ex) { throw ex; - } catch (IOException ex){ + } catch (IOException ex) { LOG.error( "List Multipart Upload Parts Failed: volume: {}, bucket: {}, ,key: " + "{} ", @@ -1873,7 +1071,7 @@ public boolean checkAccess(OzoneObj ozObject, RequestContext context) } return hasAccess; } catch (IOException ex) { - if(ex instanceof OMException) { + if (ex instanceof OMException) { throw (OMException) ex; } LOG.error("CheckAccess operation failed for key:{}/{}/{}", volume, @@ -2132,53 +1330,6 @@ private OzoneFileStatus getOzoneFileStatusFSO(String volumeName, FILE_NOT_FOUND); } - /** - * Ozone FS api to create a directory. Parent directories if do not exist - * are created for the input directory. - * - * @param args Key args - * @throws OMException if any entry in the path exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - @Override - public void createDirectory(OmKeyArgs args) throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - try { - - // Check if this is the root of the filesystem. - if (keyName.length() == 0) { - return; - } - - Path keyPath = Paths.get(keyName); - OzoneFileStatus status = - verifyNoFilesInPath(volumeName, bucketName, keyPath, false); - if (status != null && status.getTrimmedName() - .equals(keyName)) { - // if directory already exists - return; - } - OmKeyInfo dirDbKeyInfo = - createDirectoryKey(volumeName, bucketName, keyName, args.getAcls()); - String dirDbKey = metadataManager - .getOzoneKey(volumeName, bucketName, dirDbKeyInfo.getKeyName()); - metadataManager - .getKeyTable(getBucketLayout(metadataManager, volumeName, bucketName)) - .put(dirDbKey, dirDbKeyInfo); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - } - private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, String keyName, List acls) throws IOException { // verify bucket exists @@ -2200,65 +1351,6 @@ private OmKeyInfo createDirectoryKey(String volumeName, String bucketName, .setAcls(acls) .build(); } - - /** - * OzoneFS api to creates an output stream for a file. - * - * @param args Key args - * @param isOverWrite if true existing file at the location will be - * overwritten - * @param isRecursive if true file would be created even if parent - * directories do not exist - * @throws OMException if given key is a directory - * if file exists and isOverwrite flag is false - * if an ancestor exists as a file - * if bucket does not exist - * @throws IOException if there is error in the db - * invalid arguments - */ - @Override - public OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, - boolean isRecursive) throws IOException { - Preconditions.checkNotNull(args, "Key args can not be null"); - String volumeName = args.getVolumeName(); - String bucketName = args.getBucketName(); - String keyName = args.getKeyName(); - OpenKeySession keySession; - - metadataManager.getLock().acquireWriteLock(BUCKET_LOCK, volumeName, - bucketName); - try { - OzoneFileStatus fileStatus; - try { - fileStatus = getFileStatus(args); - if (fileStatus.isDirectory()) { - throw new OMException("Can not write to directory: " + keyName, - ResultCodes.NOT_A_FILE); - } else if (fileStatus.isFile()) { - if (!isOverWrite) { - throw new OMException("File " + keyName + " already exists", - ResultCodes.FILE_ALREADY_EXISTS); - } - } - } catch (OMException ex) { - if (ex.getResult() != FILE_NOT_FOUND) { - throw ex; - } - } - - verifyNoFilesInPath(volumeName, bucketName, - Paths.get(keyName).getParent(), !isRecursive); - // TODO: Optimize call to openKey as keyInfo is already available in the - // filestatus. We can avoid some operations in openKey call. - keySession = openKey(args); - } finally { - metadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, - bucketName); - } - - return keySession; - } - /** * OzoneFS api to lookup for a file. * @@ -2671,7 +1763,7 @@ public List listStatusFSO(OmKeyArgs args, boolean recursive, if (fileStatusInfo != null) { prefixKeyInDB = fileStatusInfo.getKeyInfo().getParentObjectID(); - if(fileStatusInfo.isDirectory()){ + if (fileStatusInfo.isDirectory()) { seekDirInDB = metadataManager.getOzonePathKey(prefixKeyInDB, fileStatusInfo.getKeyInfo().getFileName()); @@ -2913,7 +2005,7 @@ private int listStatusFindFilesInTableCache( String cacheKey = entry.getKey().getCacheKey(); OmKeyInfo cacheOmKeyInfo = entry.getValue().getCacheValue(); // cacheOmKeyInfo is null if an entry is deleted in cache - if(cacheOmKeyInfo == null){ + if (cacheOmKeyInfo == null) { deletedKeySet.add(cacheKey); continue; } @@ -2958,7 +2050,7 @@ private int listStatusFindDirsInTableCache( String cacheKey = entry.getKey().getCacheKey(); OmDirectoryInfo cacheOmDirInfo = entry.getValue().getCacheValue(); // cacheOmKeyInfo is null if an entry is deleted in cache - if(cacheOmDirInfo == null){ + if (cacheOmDirInfo == null) { deletedKeySet.add(cacheKey); continue; } @@ -3279,10 +2371,6 @@ public boolean isBucketFSOptimized(String volName, String buckName) return false; } - private BucketLayout getBucketLayout() { - return BucketLayout.DEFAULT; - } - private BucketLayout getBucketLayout(OMMetadataManager omMetadataManager, String volName, String buckName) { if (omMetadataManager == null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index 69871fdf8f8..827727c2c2f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -31,7 +31,7 @@ * This class is for maintaining Ozone Manager statistics. */ @InterfaceAudience.Private -@Metrics(about="Ozone Manager Metrics", context="dfs") +@Metrics(about = "Ozone Manager Metrics", context = "dfs") public class OMMetrics { private static final String SOURCE_NAME = OMMetrics.class.getSimpleName(); @@ -246,17 +246,17 @@ public void setNumBuckets(long val) { public void setNumKeys(long val) { long oldVal = this.numKeys.value(); - this.numKeys.incr(val- oldVal); + this.numKeys.incr(val - oldVal); } public void setNumDirs(long val) { long oldVal = this.numDirs.value(); - this.numDirs.incr(val- oldVal); + this.numDirs.incr(val - oldVal); } public void setNumFiles(long val) { long oldVal = this.numDirs.value(); - this.numDirs.incr(val- oldVal); + this.numDirs.incr(val - oldVal); } public void decNumKeys(long val) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index d1095ad1b20..c2dcb20f9ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -834,13 +834,13 @@ public List listBuckets(final String volumeName, @Override public Iterator, CacheValue>> - getBucketIterator(){ + getBucketIterator() { return bucketTable.cacheIterator(); } @Override public TableIterator> - getKeyIterator(){ + getKeyIterator() { return keyTable.iterator(); } @@ -891,7 +891,6 @@ public List listKeys(String volumeName, String bucketName, TreeMap cacheKeyMap = new TreeMap<>(); - Set deletedKeySet = new TreeSet<>(); Iterator, CacheValue>> iterator = keyTable.cacheIterator(); @@ -911,12 +910,10 @@ public List listKeys(String volumeName, String bucketName, OmKeyInfo omKeyInfo = entry.getValue().getCacheValue(); // Making sure that entry in cache is not for delete key request. - if (omKeyInfo != null) { - if (key.startsWith(seekPrefix) && key.compareTo(seekKey) >= 0) { - cacheKeyMap.put(key, omKeyInfo); - } - } else { - deletedKeySet.add(key); + if (omKeyInfo != null + && key.startsWith(seekPrefix) + && key.compareTo(seekKey) >= 0) { + cacheKeyMap.put(key, omKeyInfo); } } @@ -934,7 +931,9 @@ public List listKeys(String volumeName, String bucketName, // Entry should not be marked for delete, consider only those // entries. - if(!deletedKeySet.contains(kv.getKey())) { + CacheValue cacheValue = + keyTable.getCacheValue(new CacheKey<>(kv.getKey())); + if (cacheValue == null || cacheValue.getCacheValue() != null) { cacheKeyMap.put(kv.getKey(), kv.getValue()); currentCount++; } @@ -965,7 +964,6 @@ public List listKeys(String volumeName, String bucketName, // Clear map and set. cacheKeyMap.clear(); - deletedKeySet.clear(); return result; } @@ -1107,7 +1105,7 @@ public List getPendingDeletionKeys(final int keyCount) if (kv != null) { RepeatedOmKeyInfo infoList = kv.getValue(); // Get block keys as a list. - for(OmKeyInfo info : infoList.getOmKeyInfoList()){ + for (OmKeyInfo info : infoList.getOmKeyInfoList()) { OmKeyLocationInfoGroup latest = info.getLatestVersionLocations(); List item = latest.getLocationList().stream() .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java index e7834db0df0..7c322581b2a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneAclUtils.java @@ -63,7 +63,7 @@ public static void checkAllAcls(OzoneManager ozoneManager, //OzoneNativeAuthorizer differs from Ranger Authorizer as Ranger requires // only READ access on parent level access. OzoneNativeAuthorizer has // different parent level access based on the child level access type - if(ozoneManager.isNativeAuthorizerEnabled()) { + if (ozoneManager.isNativeAuthorizerEnabled()) { if (aclType == IAccessAuthorizer.ACLType.CREATE || aclType == IAccessAuthorizer.ACLType.DELETE || aclType == IAccessAuthorizer.ACLType.WRITE_ACL) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 60b23819ff4..6297199c27e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -2433,11 +2433,11 @@ public boolean checkAcls(OzoneObj obj, RequestContext context, if (!accessAuthorizer.checkAccess(obj, context)) { if (throwIfPermissionDenied) { - String volumeName = obj.getVolumeName() != null? - "Volume:" + obj.getVolumeName() + " ": ""; - String bucketName = obj.getBucketName() != null? - "Bucket:" + obj.getBucketName() + " ": ""; - String keyName = obj.getKeyName() != null? + String volumeName = obj.getVolumeName() != null ? + "Volume:" + obj.getVolumeName() + " " : ""; + String bucketName = obj.getBucketName() != null ? + "Bucket:" + obj.getBucketName() + " " : ""; + String keyName = obj.getKeyName() != null ? "Key:" + obj.getKeyName() : ""; LOG.warn("User {} doesn't have {} permission to access {} {}{}{}", context.getClientUgi().getUserName(), context.getAclRights(), @@ -3543,8 +3543,12 @@ public boolean isRatisEnabled() { public DBUpdates getDBUpdates( DBUpdatesRequest dbUpdatesRequest) throws SequenceNumberNotFoundException { + long limitCount = Long.MAX_VALUE; + if (dbUpdatesRequest.hasLimitCount()) { + limitCount = dbUpdatesRequest.getLimitCount(); + } DBUpdatesWrapper updatesSince = metadataManager.getStore() - .getUpdatesSince(dbUpdatesRequest.getSequenceNumber()); + .getUpdatesSince(dbUpdatesRequest.getSequenceNumber(), limitCount); DBUpdates dbUpdates = new DBUpdates(updatesSince.getData()); dbUpdates.setCurrentSequenceNumber(updatesSince.getCurrentSequenceNumber()); return dbUpdates; @@ -3761,7 +3765,7 @@ private void addS3GVolumeToDB() throws IOException { // Commit to DB. - try(BatchOperation batchOperation = + try (BatchOperation batchOperation = metadataManager.getStore().initBatchOperation()) { metadataManager.getVolumeTable().putWithBatch(batchOperation, dbVolumeKey, omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java index f24c00d3d04..4250e0f8fbd 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.server.http.BaseHttpServer; import org.apache.hadoop.ozone.OzoneConsts; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT; /** @@ -36,7 +36,7 @@ public OzoneManagerHttpServer(MutableConfigurationSource conf, super(conf, "ozoneManager"); addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT, ServiceListJSONServlet.class); - addServlet("dbCheckpoint", OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT, + addServlet("dbCheckpoint", OZONE_DB_CHECKPOINT_HTTP_ENDPOINT, OMDBCheckpointServlet.class); getWebAppContext().setAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE, om); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java index 130ce4dd0d1..2ffb3882f8b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerPrepareState.java @@ -182,7 +182,7 @@ public synchronized void restorePrepareFromFile(long currentIndex) File prepareMarkerFile = getPrepareMarkerFile(); if (prepareMarkerFile.exists()) { byte[] data = new byte[(int) prepareMarkerFile.length()]; - try(FileInputStream stream = new FileInputStream(prepareMarkerFile)) { + try (FileInputStream stream = new FileInputStream(prepareMarkerFile)) { stream.read(data); } catch (IOException e) { throwPrepareException(e, "Failed to read prepare marker " + @@ -255,7 +255,7 @@ private void writePrepareMarkerFile(long index) throws IOException { File parentDir = markerFile.getParentFile(); Files.createDirectories(parentDir.toPath()); - try(FileOutputStream stream = new FileOutputStream(markerFile)) { + try (FileOutputStream stream = new FileOutputStream(markerFile)) { stream.write(Long.toString(index).getBytes(StandardCharsets.UTF_8)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java index ac2091fa26e..8a22bff1279 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java @@ -178,7 +178,7 @@ private void commonInit() { * to execute its tasks. This allows the dependency to be injected for unit * testing. */ - static class OMStarterHelper implements OMStarterInterface{ + static class OMStarterHelper implements OMStarterInterface { @Override public void start(OzoneConfiguration conf) throws IOException, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java index 57d17cd4339..634968ac298 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3SecretManagerImpl.java @@ -65,7 +65,7 @@ public S3SecretValue getS3Secret(String kerberosID) throws IOException { try { S3SecretValue s3Secret = omMetadataManager.getS3SecretTable().get(kerberosID); - if(s3Secret == null) { + if (s3Secret == null) { byte[] secret = OmUtils.getSHADigest(); result = new S3SecretValue(kerberosID, DigestUtils.sha256Hex(secret)); omMetadataManager.getS3SecretTable().put(kerberosID, result); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java index 92b76913be3..68d38141fb3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java @@ -135,7 +135,7 @@ public FSDataInputStream open(Path path, int i) { public FSDataOutputStream create(Path path, FsPermission fsPermission, boolean b, int i, short i1, - long l, Progressable progressable){ + long l, Progressable progressable) { throw new UnsupportedOperationException( "fs.create() not implemented in TrashOzoneFileSystem"); } @@ -173,12 +173,12 @@ private boolean renameFSO(OFSPath srcPath, OFSPath dstPath) { OzoneManagerProtocolProtos.OMRequest omRequest = getRenameKeyRequest(srcPath, dstPath); try { - if(omRequest != null) { + if (omRequest != null) { submitRequest(omRequest); return true; } return false; - } catch (Exception e){ + } catch (Exception e) { LOG.error("Couldn't send rename request", e); return false; } @@ -203,7 +203,7 @@ private boolean deleteFSO(OFSPath srcPath) { OzoneManagerProtocolProtos.OMRequest omRequest = getDeleteKeyRequest(srcPath); try { - if(omRequest != null) { + if (omRequest != null) { submitRequest(omRequest); return true; } @@ -299,7 +299,7 @@ public Collection getTrashRoots(boolean allUsers) { CacheValue>> bucketIterator = ozoneManager.getMetadataManager().getBucketIterator(); List ret = new ArrayList<>(); - while (bucketIterator.hasNext()){ + while (bucketIterator.hasNext()) { Map.Entry, CacheValue> entry = bucketIterator.next(); OmBucketInfo omBucketInfo = entry.getValue().getCacheValue(); @@ -316,7 +316,7 @@ public Collection getTrashRoots(boolean allUsers) { } } } - } catch (Exception e){ + } catch (Exception e) { LOG.error("Couldn't perform fs operation " + "fs.listStatus()/fs.exists()", e); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java index d8bc27010e4..e1138afc8e3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashPolicyOzone.java @@ -70,7 +70,7 @@ public class TrashPolicyOzone extends TrashPolicyDefault { /** Format of checkpoint directories used prior to Hadoop 0.23. */ private static final DateFormat OLD_CHECKPOINT = new SimpleDateFormat("yyMMddHHmm"); - private static final int MSECS_PER_MINUTE = 60*1000; + private static final int MSECS_PER_MINUTE = 60 * 1000; private long emptierInterval; @@ -78,7 +78,7 @@ public class TrashPolicyOzone extends TrashPolicyDefault { private OzoneManager om; - public TrashPolicyOzone(){ + public TrashPolicyOzone() { } @Override @@ -110,7 +110,7 @@ public void initialize(Configuration conf, FileSystem fs) { } } - TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om){ + TrashPolicyOzone(FileSystem fs, Configuration conf, OzoneManager om) { initialize(conf, fs); this.om = om; } @@ -198,7 +198,7 @@ public void run() { // sleep for interval Thread.sleep(end - now); // if not leader, thread will always be sleeping - if (!om.isLeaderReady()){ + if (!om.isLeaderReady()) { continue; } } catch (InterruptedException e) { @@ -219,7 +219,7 @@ public void run() { continue; } TrashPolicyOzone trash = new TrashPolicyOzone(fs, conf, om); - Runnable task = ()->{ + Runnable task = () -> { try { om.getMetrics().incNumTrashRootsProcessed(); trash.deleteCheckpoint(trashRoot.getPath(), false); @@ -241,7 +241,7 @@ public void run() { } try { fs.close(); - } catch(IOException e) { + } catch (IOException e) { LOG.warn("Trash cannot close FileSystem: ", e); } finally { executor.shutdown(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java index a7329c81ca3..4db738aa2fb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/fs/OzoneManagerFS.java @@ -21,7 +21,6 @@ import org.apache.hadoop.ozone.om.IOzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; import java.io.IOException; @@ -53,11 +52,6 @@ public interface OzoneManagerFS extends IOzoneAcl { OzoneFileStatus getFileStatus(OmKeyArgs args, String clientAddress) throws IOException; - void createDirectory(OmKeyArgs args) throws IOException; - - OpenKeySession createFile(OmKeyArgs args, boolean isOverWrite, - boolean isRecursive) throws IOException; - /** * Look up a file. Return the info of the file to client side. * diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java index e3ab5bd91cf..eaa38ef7132 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerDoubleBuffer.java @@ -243,7 +243,7 @@ private void flushTransactions() { setReadyBuffer(); List flushedEpochs = null; - try(BatchOperation batchOperation = omMetadataManager.getStore() + try (BatchOperation batchOperation = omMetadataManager.getStore() .initBatchOperation()) { AtomicReference lastTraceId = new AtomicReference<>(); @@ -376,7 +376,7 @@ private void addCleanupEntry(DoubleBufferEntry entry, Map applyTransaction(TransactionContext trx) { CompletableFuture future = CompletableFuture.supplyAsync( () -> runCommand(request, trxLogIndex), executorService); future.thenApply(omResponse -> { - if(!omResponse.getSuccess()) { + if (!omResponse.getSuccess()) { // When INTERNAL_ERROR or METADATA_ERROR it is considered as // critical error and terminate the OM. Considering INTERNAL_ERROR // also for now because INTERNAL_ERROR is thrown for any error @@ -516,8 +516,8 @@ private OMResponse runCommand(OMRequest request, long trxLogIndex) { */ public void updateLastAppliedIndex(List flushedEpochs) { Preconditions.checkArgument(flushedEpochs.size() > 0); - computeAndUpdateLastAppliedIndex(flushedEpochs.get(flushedEpochs.size() -1), - -1L, flushedEpochs, true); + computeAndUpdateLastAppliedIndex( + flushedEpochs.get(flushedEpochs.size() - 1), -1L, flushedEpochs, true); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java index 0480859c39e..47c2eb83ead 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java @@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.om.request.OMKeyRequestFactory; import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; +import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetOwnerRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketSetPropertyRequest; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketAddAclRequest; @@ -141,7 +142,13 @@ public static OMClientRequest createClientRequest(OMRequest omRequest, case DeleteBucket: return new OMBucketDeleteRequest(omRequest); case SetBucketProperty: - return new OMBucketSetPropertyRequest(omRequest); + boolean hasBucketOwner = omRequest.getSetBucketPropertyRequest() + .getBucketArgs().hasOwnerName(); + if (hasBucketOwner) { + return new OMBucketSetOwnerRequest(omRequest); + } else { + return new OMBucketSetPropertyRequest(omRequest); + } case AddAcl: case RemoveAcl: case SetAcl: diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index c5668a3125d..9a3eedb6829 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -160,7 +160,7 @@ public OzoneManagerProtocolProtos.UserInfo getUserInfo() { public OzoneManagerProtocolProtos.UserInfo getUserIfNotExists( OzoneManager ozoneManager) { OzoneManagerProtocolProtos.UserInfo userInfo = getUserInfo(); - if (!userInfo.hasRemoteAddress() || !userInfo.hasUserName()){ + if (!userInfo.hasRemoteAddress() || !userInfo.hasUserName()) { OzoneManagerProtocolProtos.UserInfo.Builder newuserInfo = OzoneManagerProtocolProtos.UserInfo.newBuilder(); UserGroupInformation user; @@ -169,7 +169,7 @@ public OzoneManagerProtocolProtos.UserInfo getUserIfNotExists( user = UserGroupInformation.getCurrentUser(); remoteAddress = ozoneManager.getOmRpcServerAddr() .getAddress(); - } catch (Exception e){ + } catch (Exception e) { LOG.debug("Couldn't get om Rpc server address", e); return getUserInfo(); } @@ -521,7 +521,7 @@ private static String isValidKeyPath(String path) throws OMException { if (path.length() == 0) { throw new OMException("Invalid KeyPath, empty keyName" + path, INVALID_KEY_NAME); - } else if(path.startsWith("/")) { + } else if (path.startsWith("/")) { isValid = false; } else { // Check for ".." "." ":" "/" diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index b3acaaaddb3..b90e1047274 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -336,13 +336,13 @@ public boolean checkQuotaBytesValid(OMMetadataManager metadataManager, List bucketList = metadataManager.listBuckets( omVolumeArgs.getVolume(), null, null, Integer.MAX_VALUE); - for(OmBucketInfo bucketInfo : bucketList) { + for (OmBucketInfo bucketInfo : bucketList) { long nextQuotaInBytes = bucketInfo.getQuotaInBytes(); - if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { + if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { totalBucketQuota += nextQuotaInBytes; } } - if(volumeQuotaInBytes < totalBucketQuota + if (volumeQuotaInBytes < totalBucketQuota && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) { throw new IllegalArgumentException("Total buckets quota in this volume " + "should not be greater than volume quota : the total space quota is" + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java new file mode 100644 index 00000000000..6aad48b4c71 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -0,0 +1,199 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.request.bucket; + +import com.google.common.base.Optional; +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketArgs; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.OMClientRequest; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.bucket.OMBucketSetOwnerResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handle set owner request for bucket. + */ +public class OMBucketSetOwnerRequest extends OMClientRequest { + private static final Logger LOG = + LoggerFactory.getLogger(OMBucketSetOwnerRequest.class); + + public OMBucketSetOwnerRequest(OMRequest omRequest) { + super(omRequest); + } + + @Override + public OMRequest preExecute(OzoneManager ozoneManager) + throws IOException { + long modificationTime = Time.now(); + OzoneManagerProtocolProtos.SetBucketPropertyRequest.Builder + setBucketPropertyRequestBuilder = getOmRequest() + .getSetBucketPropertyRequest().toBuilder() + .setModificationTime(modificationTime); + + return getOmRequest().toBuilder() + .setSetBucketPropertyRequest(setBucketPropertyRequestBuilder) + .setUserInfo(getUserInfo()) + .build(); + } + + @Override + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long transactionLogIndex, + OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper) { + SetBucketPropertyRequest setBucketPropertyRequest = + getOmRequest().getSetBucketPropertyRequest(); + Preconditions.checkNotNull(setBucketPropertyRequest); + + OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( + getOmRequest()); + + if (!setBucketPropertyRequest.getBucketArgs().hasOwnerName()) { + omResponse.setStatus(OzoneManagerProtocolProtos.Status.INVALID_REQUEST) + .setSuccess(false); + return new OMBucketSetOwnerResponse(omResponse.build()); + } + + OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + OMMetrics omMetrics = ozoneManager.getMetrics(); + omMetrics.incNumBucketUpdates(); + + BucketArgs bucketArgs = setBucketPropertyRequest.getBucketArgs(); + OmBucketArgs omBucketArgs = OmBucketArgs.getFromProtobuf(bucketArgs); + + String volumeName = bucketArgs.getVolumeName(); + String bucketName = bucketArgs.getBucketName(); + String newOwner = bucketArgs.getOwnerName(); + String oldOwner = null; + + AuditLogger auditLogger = ozoneManager.getAuditLogger(); + OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); + IOException exception = null; + boolean acquiredBucketLock = false, success = true; + OMClientResponse omClientResponse = null; + try { + // check Acl + if (ozoneManager.getAclsEnabled()) { + checkAcls(ozoneManager, OzoneObj.ResourceType.BUCKET, + OzoneObj.StoreType.OZONE, IAccessAuthorizer.ACLType.WRITE_ACL, + volumeName, bucketName, null); + } + + // acquire lock. + acquiredBucketLock = omMetadataManager.getLock().acquireWriteLock( + BUCKET_LOCK, volumeName, bucketName); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + omMetadataManager.getBucketTable().get(bucketKey); + //Check if bucket exist + if (omBucketInfo == null) { + LOG.debug("Bucket: {} not found ", bucketName); + throw new OMException("Bucket doesnt exist", + OMException.ResultCodes.BUCKET_NOT_FOUND); + } + + oldOwner = omBucketInfo.getOwner(); + + if (oldOwner.equals(newOwner)) { + LOG.warn("Bucket '{}/{}' owner is already user '{}'.", + volumeName, bucketName, oldOwner); + omResponse.setStatus(OzoneManagerProtocolProtos.Status.OK) + .setMessage("Bucket '" + volumeName + "/" + bucketName + + "' owner is already '" + newOwner + "'.") + .setSuccess(false); + omResponse.setSetBucketPropertyResponse( + SetBucketPropertyResponse.newBuilder().setResponse(false).build()); + omClientResponse = new OMBucketSetOwnerResponse(omResponse.build()); + return omClientResponse; + } + + omBucketInfo.setOwner(newOwner); + LOG.debug("Updating bucket owner to {} for bucket: {} in volume: {}", + newOwner, bucketName, volumeName); + + omBucketInfo.setModificationTime( + setBucketPropertyRequest.getModificationTime()); + // Set the updateID to current transaction log index + omBucketInfo.setUpdateID(transactionLogIndex, + ozoneManager.isRatisEnabled()); + + // Update table cache. + omMetadataManager.getBucketTable().addCacheEntry( + new CacheKey<>(bucketKey), + new CacheValue<>(Optional.of(omBucketInfo), transactionLogIndex)); + + omResponse.setSetBucketPropertyResponse( + SetBucketPropertyResponse.newBuilder().setResponse(true).build()); + omClientResponse = new OMBucketSetOwnerResponse( + omResponse.build(), omBucketInfo); + } catch (IOException ex) { + success = false; + exception = ex; + omClientResponse = new OMBucketSetOwnerResponse( + createErrorOMResponse(omResponse, exception)); + } finally { + addResponseToDoubleBuffer(transactionLogIndex, omClientResponse, + ozoneManagerDoubleBufferHelper); + if (acquiredBucketLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } + } + + // Performing audit logging outside of the lock. + auditLog(auditLogger, buildAuditMessage(OMAction.SET_OWNER, + omBucketArgs.toAuditMap(), exception, userInfo)); + + // return response. + if (success) { + LOG.debug("Successfully changed Owner of Bucket {}/{} from {} -> {}", + volumeName, bucketName, oldOwner, newOwner); + return omClientResponse; + } else { + LOG.error("Setting Owner failed for bucket:{} in volume:{}", + bucketName, volumeName, exception); + omMetrics.incNumBucketUpdateFails(); + return omClientResponse; + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 44c37562cf3..17c4e3925d3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -267,15 +267,15 @@ public boolean checkQuotaBytesValid(OMMetadataManager metadataManager, } List bucketList = metadataManager.listBuckets( omVolumeArgs.getVolume(), null, null, Integer.MAX_VALUE); - for(OmBucketInfo bucketInfo : bucketList) { + for (OmBucketInfo bucketInfo : bucketList) { long nextQuotaInBytes = bucketInfo.getQuotaInBytes(); - if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET && + if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET && !omBucketArgs.getBucketName().equals(bucketInfo.getBucketName())) { totalBucketQuota += nextQuotaInBytes; } } - if(volumeQuotaInBytes < totalBucketQuota && + if (volumeQuotaInBytes < totalBucketQuota && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) { throw new OMException("Total buckets quota in this volume " + "should not be greater than volume quota : the total space quota is" + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java index d4ba0175057..5d01c4ff6ac 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java @@ -120,7 +120,7 @@ OMClientResponse onSuccess(OMResponse.Builder omResponse, @Override void onComplete(boolean operationResult, IOException exception, OMMetrics omMetrics, AuditLogger auditLogger, - Map auditMap){ + Map auditMap) { auditLog(auditLogger, buildAuditMessage(OMAction.SET_ACL, auditMap, exception, getOmRequest().getUserInfo())); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index e197cca8301..48d4274284e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -193,7 +193,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmKeyInfo dirKeyInfo = null; if (omDirectoryResult == FILE_EXISTS || omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) { - throw new OMException("Unable to create directory: " +keyName + throw new OMException("Unable to create directory: " + keyName + " in volume/bucket: " + volumeName + "/" + bucketName, FILE_ALREADY_EXISTS); } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH || diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 4e19b998266..24994d7a86f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -97,7 +97,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java index 9f05fee7e4f..f46b2dd29ff 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java @@ -199,7 +199,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( // Add all the sub-dirs to the missing list except the leaf element. // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt. // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list. - if(elements.hasNext()){ + if (elements.hasNext()) { // skips leaf node. missing.add(fileName); } @@ -273,7 +273,7 @@ public static OMPathInfoWithFSO verifyDirectoryKeysInPath( * Includes the list of missing intermediate directories and * the directory search result code. */ - public static class OMPathInfoWithFSO extends OMPathInfo{ + public static class OMPathInfoWithFSO extends OMPathInfo { private String leafNodeName; private long lastKnownParentId; private long leafNodeObjectId; @@ -759,7 +759,7 @@ public static String getAbsolutePath(String prefixName, String fileName) { * @param keyInfo omKeyInfo * @return omDirectoryInfo object */ - public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){ + public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo) { OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder(); builder.setParentObjectID(keyInfo.getParentObjectID()); builder.setAcls(keyInfo.getAcls()); @@ -838,7 +838,7 @@ public static long getToKeyNameParentId(String volumeName, "Failed to rename %s to %s, %s doesn't exist", fromKeyName, toKeyName, toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR); - } else if (toKeyParentDirStatus.isFile()){ + } else if (toKeyParentDirStatus.isFile()) { throw new OMException(String.format( "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName, toKeyParentDir), OMException.ResultCodes.KEY_RENAME_ERROR); @@ -975,7 +975,7 @@ public static long getParentID(long bucketId, Iterator pathComponents, long lastKnownParentId = bucketId; // If no sub-dirs then bucketID is the root/parent. - if(!pathComponents.hasNext()){ + if (!pathComponents.hasNext()) { return bucketId; } if (StringUtils.isBlank(errMsg)) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 9a2ac61216a..9bdb51f7fd1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -88,7 +88,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(StringUtils.removeEnd(keyArgs.getKeyName(), OzoneConsts.FS_FILE_COPYING_TEMP_SUFFIX)); } @@ -265,7 +265,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); - if(bucketLockAcquired) { + if (bucketLockAcquired) { omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, bucketName); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 50d9e4cdc38..ffa3ebf4631 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -98,7 +98,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(keyArgs.getKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 99ca308ac4e..0c96756db45 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -88,7 +88,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { final boolean checkKeyNameEnabled = ozoneManager.getConfiguration() .getBoolean(OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_KEY, OMConfigKeys.OZONE_OM_KEYNAME_CHARACTER_CHECK_ENABLED_DEFAULT); - if(checkKeyNameEnabled){ + if (checkKeyNameEnabled) { OmUtils.validateKeyName(renameKeyRequest.getToKeyName()); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index 2235bafb8da..f9c67d6a94b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -149,7 +149,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, volumeName, bucketName, toKeyName, 0); // Check if toKey exists. - if(toKeyFileStatus != null) { + if (toKeyFileStatus != null) { // Destination exists and following are different cases: OmKeyInfo toKeyValue = toKeyFileStatus.getKeyInfo(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index b4528b889c1..8fe9011ca0f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -259,22 +259,22 @@ protected List< OzoneAcl > getAclsForKey(KeyArgs keyArgs, OmBucketInfo bucketInfo, PrefixManager prefixManager) { List acls = new ArrayList<>(); - if(keyArgs.getAclsList() != null) { + if (keyArgs.getAclsList() != null) { acls.addAll(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList())); } // Inherit DEFAULT acls from prefix. - if(prefixManager != null) { + if (prefixManager != null) { List< OmPrefixInfo > prefixList = prefixManager.getLongestPrefixPath( OZONE_URI_DELIMITER + keyArgs.getVolumeName() + OZONE_URI_DELIMITER + keyArgs.getBucketName() + OZONE_URI_DELIMITER + keyArgs.getKeyName()); - if(prefixList.size() > 0) { + if (prefixList.size() > 0) { // Add all acls from direct parent to key. OmPrefixInfo prefixInfo = prefixList.get(prefixList.size() - 1); - if(prefixInfo != null) { + if (prefixInfo != null) { if (OzoneAclUtil.inheritDefaultAcls(acls, prefixInfo.getAcls())) { return acls; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java similarity index 98% rename from hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java rename to hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index d945ae56fa8..042b710b080 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.om.response.key; +package org.apache.hadoop.ozone.om.request.key; import com.google.common.base.Optional; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; @@ -25,9 +25,9 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMOpenKeysDeleteResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OpenKeyBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OpenKey; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index 4a45ef97d82..0fe89250f1b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -112,7 +112,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (getOmRequest().getAddAclRequest().hasObj() && operationResult) { modificationTime = getOmRequest().getAddAclRequest() .getModificationTime(); - } else if (getOmRequest().getSetAclRequest().hasObj() && operationResult){ + } else if (getOmRequest().getSetAclRequest().hasObj() + && operationResult) { modificationTime = getOmRequest().getSetAclRequest() .getModificationTime(); } else if (getOmRequest().getRemoveAclRequest().hasObj() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 21d10c4f1e6..b7cf6561360 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -168,7 +168,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, int keyFactor = omKeyInfo.getReplicationConfig().getRequiredNodes(); Iterator iter = multipartKeyInfo.getPartKeyInfoMap().entrySet().iterator(); - while(iter.hasNext()) { + while (iter.hasNext()) { Map.Entry entry = (Map.Entry)iter.next(); PartKeyInfo iterPartKeyInfo = (PartKeyInfo)entry.getValue(); quotaReleased += diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 26d962703a7..d8848fc116d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -197,13 +197,13 @@ public boolean checkQuotaBytesValid(OMMetadataManager metadataManager, List bucketList = metadataManager.listBuckets( volumeName, null, null, Integer.MAX_VALUE); - for(OmBucketInfo bucketInfo : bucketList) { + for (OmBucketInfo bucketInfo : bucketList) { long nextQuotaInBytes = bucketInfo.getQuotaInBytes(); - if(nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { + if (nextQuotaInBytes > OzoneConsts.QUOTA_RESET) { totalBucketQuota += nextQuotaInBytes; } } - if(volumeQuotaInBytes < totalBucketQuota && + if (volumeQuotaInBytes < totalBucketQuota && volumeQuotaInBytes != OzoneConsts.QUOTA_RESET) { throw new OMException("Total buckets quota in this volume " + "should not be greater than volume quota : the total space quota is" + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index b91aef8d841..ce1a4d03d9f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -102,7 +102,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, if (getOmRequest().getAddAclRequest().hasObj()) { modificationTime = getOmRequest().getAddAclRequest() .getModificationTime(); - } else if (getOmRequest().getSetAclRequest().hasObj()){ + } else if (getOmRequest().getSetAclRequest().hasObj()) { modificationTime = getOmRequest().getSetAclRequest() .getModificationTime(); } else if (getOmRequest().getRemoveAclRequest().hasObj()) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java index 8df7792e3b9..e776df08594 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java @@ -110,7 +110,7 @@ OMResponse.Builder onInit() { @Override OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean aclApplied){ + OmVolumeArgs omVolumeArgs, boolean aclApplied) { omResponse.setAddAclResponse(OzoneManagerProtocolProtos.AddAclResponse .newBuilder().setResponse(aclApplied).build()); return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java index 4ab55f3788c..ff2792d44fe 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java @@ -110,7 +110,7 @@ OMResponse.Builder onInit() { @Override OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean aclApplied){ + OmVolumeArgs omVolumeArgs, boolean aclApplied) { omResponse.setRemoveAclResponse(OzoneManagerProtocolProtos.RemoveAclResponse .newBuilder().setResponse(aclApplied).build()); return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 710250920b5..95d98f4ddda 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -107,7 +107,7 @@ OMResponse.Builder onInit() { @Override OMClientResponse onSuccess(OMResponse.Builder omResponse, - OmVolumeArgs omVolumeArgs, boolean aclApplied){ + OmVolumeArgs omVolumeArgs, boolean aclApplied) { omResponse.setSetAclResponse(OzoneManagerProtocolProtos.SetAclResponse .newBuilder().setResponse(aclApplied).build()); return new OMVolumeAclOpResponse(omResponse.build(), omVolumeArgs); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java new file mode 100644 index 00000000000..268787f33a9 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/bucket/OMBucketSetOwnerResponse.java @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.response.bucket; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.response.CleanupTableInfo; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos + .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; + +import javax.annotation.Nonnull; +import java.io.IOException; + +import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; + +/** + * Response for set owner request. + */ +@CleanupTableInfo(cleanupTables = {BUCKET_TABLE}) +public class OMBucketSetOwnerResponse extends OMClientResponse { + + private OmBucketInfo omBucketInfo; + + public OMBucketSetOwnerResponse(@Nonnull OMResponse omResponse, + @Nonnull OmBucketInfo omBucketInfo) { + super(omResponse); + this.omBucketInfo = omBucketInfo; + } + + /** + * For when the request is not successful. + * For a successful request, the other constructor should be used. + */ + public OMBucketSetOwnerResponse(@Nonnull OMResponse omResponse) { + super(omResponse); + if (omResponse.getSuccess()) { + checkStatusNotOK(); + } + } + + @Override + public void checkAndUpdateDB(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + // When newOwner is the same as oldOwner, status is OK but success is false. + // We don't want to add it to DB batch in this case. + if (getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK && + getOMResponse().getSuccess()) { + addToDBBatch(omMetadataManager, batchOperation); + } + } + + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + + String dbBucketKey = + omMetadataManager.getBucketKey(omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + dbBucketKey, omBucketInfo); + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 5d34f6a32ca..10dda299793 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -404,7 +404,7 @@ public void removeToken(OzoneTokenIdentifier ozoneTokenIdentifier) { @Override public byte[] retrievePassword(OzoneTokenIdentifier identifier) throws InvalidToken { - if(identifier.getTokenType().equals(S3AUTHINFO)) { + if (identifier.getTokenType().equals(S3AUTHINFO)) { return validateS3AuthInfo(identifier); } return validateToken(identifier).getPassword(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java index 756e821bf81..fb348fd4f2b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneSecretStore.java @@ -102,9 +102,9 @@ public int loadTokens(OzoneManagerSecretState state) throws IOException { int loadedToken = 0; try (TableIterator> iterator = - omMetadataManager.getDelegationTokenTable().iterator()){ + omMetadataManager.getDelegationTokenTable().iterator()) { iterator.seekToFirst(); - while(iterator.hasNext()) { + while (iterator.hasNext()) { KeyValue kv = iterator.next(); state.tokenState.put(kv.getKey(), kv.getValue()); loadedToken++; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java new file mode 100644 index 00000000000..a8cbdc6425e --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.mockito.Mockito; + +import java.io.IOException; + +/** + * Test utility for creating a dummy OM, the associated + * managers, and writeClient. + */ +public final class OmTestManagers { + + private OzoneManagerProtocol writeClient; + private OzoneManager om; + private KeyManager keyManager; + private OMMetadataManager metadataManager; + private VolumeManager volumeManager; + private BucketManager bucketManager; + private PrefixManager prefixManager; + + public OzoneManager getOzoneManager() { + return om; + } + public OzoneManagerProtocol getWriteClient() { + return writeClient; + } + public BucketManager getBucketManager() { + return bucketManager; + } + public VolumeManager getVolumeManager() { + return volumeManager; + } + public PrefixManager getPrefixManager() { + return prefixManager; + } + public OMMetadataManager getMetadataManager() { + return metadataManager; + } + public KeyManager getKeyManager() { + return keyManager; + } + + public OmTestManagers(OzoneConfiguration conf) + throws AuthenticationException, IOException { + this(conf, null, null); + } + + public OmTestManagers(OzoneConfiguration conf, + ScmBlockLocationProtocol blockClient, + StorageContainerLocationProtocol containerClient) + throws AuthenticationException, IOException { + if (containerClient == null) { + containerClient = + Mockito.mock(StorageContainerLocationProtocol.class); + } + if (blockClient == null) { + blockClient = + new ScmBlockLocationTestingClient(null, null, 0); + } + + conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); + DefaultMetricsSystem.setMiniClusterMode(true); + OMStorage omStorage = new OMStorage(conf); + omStorage.setClusterId("omtest"); + omStorage.setOmId("omtest"); + omStorage.initialize(); + OzoneManager.setTestSecureOmFlag(true); + om = OzoneManager.createOm(conf, + OzoneManager.StartupOption.REGUALR); + + keyManager = (KeyManagerImpl) HddsWhiteboxTestUtils + .getInternalState(om, "keyManager"); + ScmClient scmClient = new ScmClient(blockClient, containerClient); + HddsWhiteboxTestUtils.setInternalState(om, + "scmClient", scmClient); + HddsWhiteboxTestUtils.setInternalState(keyManager, + "scmClient", scmClient); + HddsWhiteboxTestUtils.setInternalState(keyManager, + "secretManager", Mockito.mock(OzoneBlockTokenSecretManager.class)); + + om.start(); + writeClient = OzoneClientFactory.getRpcClient(conf) + .getObjectStore().getClientProxy().getOzoneManagerClient(); + metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils + .getInternalState(om, "metadataManager"); + volumeManager = (VolumeManagerImpl)HddsWhiteboxTestUtils + .getInternalState(om, "volumeManager"); + bucketManager = (BucketManagerImpl)HddsWhiteboxTestUtils + .getInternalState(om, "bucketManager"); + prefixManager = (PrefixManagerImpl)HddsWhiteboxTestUtils + .getInternalState(om, "prefixManager"); + + } + +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java index ae263b6c3bd..542c7749221 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java @@ -30,7 +30,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; import org.apache.hadoop.ozone.om.helpers.*; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Ignore; import org.junit.Rule; @@ -223,7 +223,7 @@ public void testGetBucketInfo() throws Exception { .setAdminName("bilbo") .setOwnerName("bilbo") .build(); - TestOMRequestUtils.addVolumeToOM(metaMgr, args); + OMRequestTestUtils.addVolumeToOM(metaMgr, args); // Create bucket createBucket(metaMgr, bucketInfo); // Check exception thrown when bucket does not exist @@ -245,7 +245,7 @@ public void testGetBucketInfo() throws Exception { private void createBucket(OMMetadataManager metadataManager, OmBucketInfo bucketInfo) throws IOException { - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); } @Test diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java index 7d5fb60ebd9..9f17464fbc2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java @@ -23,20 +23,25 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.hdds.utils.db.DBConfigFromFile; @@ -44,6 +49,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import org.junit.After; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -61,6 +67,10 @@ public class TestKeyDeletingService { @Rule public TemporaryFolder folder = new TemporaryFolder(); + private OzoneManagerProtocol writeClient; + private OzoneManager om; + private static final Logger LOG = + LoggerFactory.getLogger(TestKeyDeletingService.class); private OzoneConfiguration createConfAndInitValues() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(); @@ -79,6 +89,11 @@ private OzoneConfiguration createConfAndInitValues() throws IOException { return conf; } + @After + public void cleanup() throws Exception { + om.stop(); + } + /** * In this test, we create a bunch of keys and delete them. Then we start the * KeyDeletingService and pass a SCMClient which does not fail. We make sure @@ -90,14 +105,15 @@ private OzoneConfiguration createConfAndInitValues() throws IOException { @Test(timeout = 30000) public void checkIfDeleteServiceisDeletingKeys() - throws IOException, TimeoutException, InterruptedException { + throws IOException, TimeoutException, InterruptedException, + AuthenticationException { OzoneConfiguration conf = createConfAndInitValues(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - KeyManager keyManager = - new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 0), - metaMgr, conf, UUID.randomUUID().toString(), null); - keyManager.start(conf); + OmTestManagers omTestManagers + = new OmTestManagers(conf); + KeyManager keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 1); KeyDeletingService keyDeletingService = @@ -110,24 +126,40 @@ public void checkIfDeleteServiceisDeletingKeys() keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), 0); } - @Test(timeout = 30000) + @Test(timeout = 40000) public void checkIfDeleteServiceWithFailingSCM() - throws IOException, TimeoutException, InterruptedException { + throws IOException, TimeoutException, InterruptedException, + AuthenticationException { OzoneConfiguration conf = createConfAndInitValues(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - //failCallsFrequency = 1 , means all calls fail. - KeyManager keyManager = - new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 1), - metaMgr, conf, UUID.randomUUID().toString(), null); - keyManager.start(conf); + ScmBlockLocationProtocol blockClient = + //failCallsFrequency = 1 , means all calls fail. + new ScmBlockLocationTestingClient(null, null, 1); + OmTestManagers omTestManagers + = new OmTestManagers(conf, blockClient, null); + KeyManager keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 1); KeyDeletingService keyDeletingService = (KeyDeletingService) keyManager.getDeletingService(); - keyManager.start(conf); - Assert.assertEquals( - keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount); + GenericTestUtils.waitFor( + () -> { + try { + int numPendingDeletionKeys = + keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(); + if (numPendingDeletionKeys != keyCount) { + LOG.info("Expected {} keys to be pending deletion, but got {}", + keyCount, numPendingDeletionKeys); + return false; + } + return true; + } catch (IOException e) { + LOG.error("Error while getting pending deletion keys.", e); + return false; + } + }, 100, 2000); // Make sure that we have run the background thread 5 times more GenericTestUtils.waitFor( () -> keyDeletingService.getRunCount().get() >= 5, @@ -140,20 +172,22 @@ public void checkIfDeleteServiceWithFailingSCM() @Test(timeout = 30000) public void checkDeletionForEmptyKey() - throws IOException, TimeoutException, InterruptedException { + throws IOException, TimeoutException, InterruptedException, + AuthenticationException { OzoneConfiguration conf = createConfAndInitValues(); - OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(conf); - //failCallsFrequency = 1 , means all calls fail. - KeyManager keyManager = - new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 1), - metaMgr, conf, UUID.randomUUID().toString(), null); - keyManager.start(conf); + ScmBlockLocationProtocol blockClient = + //failCallsFrequency = 1 , means all calls fail. + new ScmBlockLocationTestingClient(null, null, 1); + OmTestManagers omTestManagers + = new OmTestManagers(conf, blockClient, null); + KeyManager keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); + final int keyCount = 100; createAndDeleteKeys(keyManager, keyCount, 0); KeyDeletingService keyDeletingService = (KeyDeletingService) keyManager.getDeletingService(); - keyManager.start(conf); // Since empty keys are directly deleted from db there should be no // pending deletion keys. Also deletedKeyCount should be zero. @@ -182,14 +216,14 @@ private void createAndDeleteKeys(KeyManager keyManager, int keyCount, // cheat here, just create a volume and bucket entry so that we can // create the keys, we put the same data for key and value since the // system does not decode the object - TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() .setOwnerName("o") .setAdminName("a") .setVolume(volumeName) .build()); - TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder().setVolumeName(volumeName) .setBucketName(bucketName) .build()); @@ -205,13 +239,13 @@ private void createAndDeleteKeys(KeyManager keyManager, int keyCount, .setLocationInfoList(new ArrayList<>()) .build(); //Open, Commit and Delete the Keys in the Key Manager. - OpenKeySession session = keyManager.openKey(arg); + OpenKeySession session = writeClient.openKey(arg); for (int i = 0; i < numBlocks; i++) { arg.addLocationInfo( - keyManager.allocateBlock(arg, session.getId(), new ExcludeList())); + writeClient.allocateBlock(arg, session.getId(), new ExcludeList())); } - keyManager.commitKey(arg, session.getId()); - keyManager.deleteKey(arg); + writeClient.commitKey(arg, session.getId()); + writeClient.deleteKey(arg); } } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index b3100795512..cfeab4b31f3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -64,9 +64,9 @@ import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadListParts; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.security.OzoneBlockTokenSecretManager; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.Time; @@ -89,7 +89,7 @@ public class TestKeyManagerUnit { private OzoneConfiguration configuration; - private OmMetadataManagerImpl metadataManager; + private OMMetadataManager metadataManager; private StorageContainerLocationProtocol containerClient; private KeyManagerImpl keyManager; @@ -97,25 +97,30 @@ public class TestKeyManagerUnit { private File testDir; private ScmBlockLocationProtocol blockClient; + private OzoneManagerProtocol writeClient; + private OzoneManager om; + @Before - public void setup() throws IOException { + public void setup() throws Exception { configuration = new OzoneConfiguration(); testDir = GenericTestUtils.getRandomizedTestDir(); configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.toString()); - metadataManager = new OmMetadataManagerImpl(configuration); containerClient = Mockito.mock(StorageContainerLocationProtocol.class); blockClient = Mockito.mock(ScmBlockLocationProtocol.class); - keyManager = new KeyManagerImpl( - blockClient, containerClient, metadataManager, configuration, - "omtest", Mockito.mock(OzoneBlockTokenSecretManager.class)); + OmTestManagers omTestManagers + = new OmTestManagers(configuration, blockClient, containerClient); + om = omTestManagers.getOzoneManager(); + metadataManager = omTestManagers.getMetadataManager(); + keyManager = (KeyManagerImpl)omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); startDate = Instant.now(); } @After public void cleanup() throws Exception { - metadataManager.stop(); + om.stop(); FileUtils.deleteDirectory(testDir); } @@ -125,7 +130,7 @@ public void listMultipartUploadPartsWithZeroUpload() throws IOException { createBucket(metadataManager, "vol1", "bucket1"); OmMultipartInfo omMultipartInfo = - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); + initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1"); //WHEN OmMultipartUploadListParts omMultipartUploadListParts = keyManager @@ -143,9 +148,9 @@ public void listMultipartUploads() throws IOException { createBucket(metadataManager, "vol1", "bucket1"); createBucket(metadataManager, "vol1", "bucket2"); - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); - initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1"); + initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1"); + initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2"); + initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1"); //WHEN OmMultipartUploadList omMultipartUploadList = @@ -178,11 +183,11 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { // Add few to cache and few to DB. addinitMultipartUploadToCache(volume, bucket, "dir/key1"); - initMultipartUpload(keyManager, volume, bucket, "dir/key2"); + initMultipartUpload(writeClient, volume, bucket, "dir/key2"); addinitMultipartUploadToCache(volume, bucket, "dir/key3"); - initMultipartUpload(keyManager, volume, bucket, "dir/key4"); + initMultipartUpload(writeClient, volume, bucket, "dir/key4"); //WHEN OmMultipartUploadList omMultipartUploadList = @@ -201,12 +206,12 @@ public void listMultipartUploadsWithFewEntriesInCache() throws IOException { // Same way add few to cache and few to DB. addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey1"); - initMultipartUpload(keyManager, volume, bucket, "dir/ozonekey2"); + initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey2"); - OmMultipartInfo omMultipartInfo3 =addinitMultipartUploadToCache(volume, + OmMultipartInfo omMultipartInfo3 = addinitMultipartUploadToCache(volume, bucket, "dir/ozonekey3"); - OmMultipartInfo omMultipartInfo4 = initMultipartUpload(keyManager, + OmMultipartInfo omMultipartInfo4 = initMultipartUpload(writeClient, volume, bucket, "dir/ozonekey4"); omMultipartUploadList = @@ -258,13 +263,13 @@ public void listMultipartUploadsWithPrefix() throws IOException { createBucket(metadataManager, "vol1", "bucket1"); createBucket(metadataManager, "vol1", "bucket2"); - initMultipartUpload(keyManager, "vol1", "bucket1", "dip/key1"); + initMultipartUpload(writeClient, "vol1", "bucket1", "dip/key1"); - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); - initMultipartUpload(keyManager, "vol1", "bucket1", "key3"); + initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1"); + initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2"); + initMultipartUpload(writeClient, "vol1", "bucket1", "key3"); - initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1"); + initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1"); //WHEN OmMultipartUploadList omMultipartUploadList = @@ -277,7 +282,7 @@ public void listMultipartUploadsWithPrefix() throws IOException { Assert.assertEquals("dir/key2", uploads.get(1).getKeyName()); } - private void createBucket(OmMetadataManagerImpl omMetadataManager, + private void createBucket(OMMetadataManager omMetadataManager, String volume, String bucket) throws IOException { OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() @@ -287,10 +292,10 @@ private void createBucket(OmMetadataManagerImpl omMetadataManager, .setIsVersionEnabled(false) .setAcls(new ArrayList<>()) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, omBucketInfo); + OMRequestTestUtils.addBucketToOM(omMetadataManager, omBucketInfo); } - private OmMultipartInfo initMultipartUpload(KeyManagerImpl omtest, + private OmMultipartInfo initMultipartUpload(OzoneManagerProtocol omtest, String volume, String bucket, String key) throws IOException { OmKeyArgs key1 = new Builder() @@ -301,7 +306,8 @@ private OmMultipartInfo initMultipartUpload(KeyManagerImpl omtest, new RatisReplicationConfig(ReplicationFactor.THREE)) .setAcls(new ArrayList<>()) .build(); - return omtest.initiateMultipartUpload(key1); + OmMultipartInfo omMultipartInfo = omtest.initiateMultipartUpload(key1); + return omMultipartInfo; } private OmMultipartInfo addinitMultipartUploadToCache( @@ -376,13 +382,13 @@ public void testLookupFileWithDnFailure() throws IOException { .setAdminName("admin") .setOwnerName("admin") .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); final OmBucketInfo bucketInfo = OmBucketInfo.newBuilder() .setVolumeName("volumeOne") .setBucketName("bucketOne") .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder() .setBlockID(new BlockID(1L, 1L)) @@ -405,7 +411,7 @@ public void testLookupFileWithDnFailure() throws IOException { new RatisReplicationConfig(ReplicationFactor.THREE)) .setAcls(Collections.emptyList()) .build(); - TestOMRequestUtils.addKeyToOM(metadataManager, keyInfo); + OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo); final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder() .setVolumeName("volumeOne") @@ -438,10 +444,10 @@ public void listStatus() throws Exception { String keyPrefix = "key"; String client = "client.host"; - TestOMRequestUtils.addVolumeToDB(volume, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volume, OzoneConsts.OZONE, metadataManager); - TestOMRequestUtils.addBucketToDB(volume, bucket, metadataManager); + OMRequestTestUtils.addBucketToDB(volume, bucket, metadataManager); final Pipeline pipeline = MockPipeline.createPipeline(3); final List nodes = pipeline.getNodes().stream() @@ -478,7 +484,7 @@ public void listStatus() throws Exception { .setUpdateID(i) .build(); keyInfo.appendNewBlocks(singletonList(keyLocationInfo), false); - TestOMRequestUtils.addKeyToOM(metadataManager, keyInfo); + OMRequestTestUtils.addKeyToOM(metadataManager, keyInfo); } when(containerClient.getContainerWithPipelineBatch(containerIDs)) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java index 73e9ea57ae2..277969c5e9d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMDBDefinition.java @@ -59,7 +59,7 @@ public void testDBDefinition() throws Exception { missingOmDBTables.remove("default"); int countOmDBTables = missingOmDBTables.size(); // Remove the file if it is found in both the datastructures - for(DBColumnFamilyDefinition definition : columnFamilyDefinitions) { + for (DBColumnFamilyDefinition definition : columnFamilyDefinitions) { if (!missingOmDBTables.remove(definition.getName())) { missingDBDefTables.add(definition.getName()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java index dbe5497ac1d..7354a940baa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmMetadataManager.java @@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -105,8 +105,8 @@ public void testListVolumes() throws Exception { .setVolume(volName) .build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - TestOMRequestUtils.addUserToDB(volName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addUserToDB(volName, ownerName, omMetadataManager); } // Test list volumes with setting startVolume that @@ -133,16 +133,16 @@ public void testListAllVolumes() throws Exception { volName = "vola" + i; OmVolumeArgs omVolumeArgs = argsBuilder. setOwnerName(ownerName).setVolume(volName).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - TestOMRequestUtils.addUserToDB(volName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addUserToDB(volName, ownerName, omMetadataManager); } for (int i = 0; i < 50; i++) { ownerName = "owner" + i; volName = "volb" + i; OmVolumeArgs omVolumeArgs = argsBuilder. setOwnerName(ownerName).setVolume(volName).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); - TestOMRequestUtils.addUserToDB(volName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addUserToDB(volName, ownerName, omMetadataManager); } String prefix = ""; @@ -177,7 +177,7 @@ public void testListBuckets() throws Exception { String prefixBucketNameWithOzoneOwner = "ozoneBucket"; String prefixBucketNameWithHadoopOwner = "hadoopBucket"; - TestOMRequestUtils.addVolumeToDB(volumeName1, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName1, omMetadataManager); TreeSet volumeABucketsPrefixWithOzoneOwner = new TreeSet<>(); @@ -201,7 +201,7 @@ public void testListBuckets() throws Exception { String volumeName2 = "volumeB"; TreeSet volumeBBucketsPrefixWithOzoneOwner = new TreeSet<>(); TreeSet volumeBBucketsPrefixWithHadoopOwner = new TreeSet<>(); - TestOMRequestUtils.addVolumeToDB(volumeName2, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName2, omMetadataManager); // Add exact name in prefixBucketNameWithOzoneOwner without postfix. volumeBBucketsPrefixWithOzoneOwner.add(prefixBucketNameWithOzoneOwner); @@ -278,7 +278,7 @@ public void testListBuckets() throws Exception { // volumeB with prefixBucketNameWithHadoopOwner. startBucket = null; TreeSet expectedBuckets = new TreeSet<>(); - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { omBucketInfoList = omMetadataManager.listBuckets(volumeName2, startBucket, prefixBucketNameWithHadoopOwner, 10); @@ -329,8 +329,8 @@ public void testListKeys() throws Exception { String ozoneTestBucket = "ozoneBucket-Test"; // Create volumes and buckets. - TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeNameB, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeNameA, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeNameB, omMetadataManager); addBucketsToCache(volumeNameA, ozoneBucket); addBucketsToCache(volumeNameB, hadoopBucket); addBucketsToCache(volumeNameA, ozoneTestBucket); @@ -341,7 +341,7 @@ public void testListKeys() throws Exception { TreeSet keysASet = new TreeSet<>(); TreeSet keysBSet = new TreeSet<>(); TreeSet keysCSet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { + for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { keysASet.add( prefixKeyA + i); @@ -357,7 +357,7 @@ public void testListKeys() throws Exception { TreeSet keysAVolumeBSet = new TreeSet<>(); TreeSet keysBVolumeBSet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { + for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { keysAVolumeBSet.add( prefixKeyA + i); @@ -422,7 +422,7 @@ public void testListKeys() throws Exception { // volumeB/ozoneBucket with "key-a". startKey = null; TreeSet expectedKeys = new TreeSet<>(); - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { omKeyInfoList = omMetadataManager.listKeys(volumeNameB, hadoopBucket, startKey, prefixKeyB, 10); @@ -463,7 +463,7 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { String ozoneBucket = "ozoneBucket"; // Create volumes and bucket. - TestOMRequestUtils.addVolumeToDB(volumeNameA, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeNameA, omMetadataManager); addBucketsToCache(volumeNameA, ozoneBucket); @@ -472,7 +472,7 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { TreeSet deleteKeySet = new TreeSet<>(); - for (int i=1; i<= 100; i++) { + for (int i = 1; i <= 100; i++) { if (i % 2 == 0) { keysASet.add( prefixKeyA + i); @@ -510,7 +510,7 @@ public void testListKeysWithFewDeleteEntriesInCache() throws Exception { // Now get key count by 10. String startKey = null; expectedKeys = new TreeSet<>(); - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { omKeyInfoList = omMetadataManager.listKeys(volumeNameA, ozoneBucket, startKey, prefixKeyA, 10); @@ -565,11 +565,11 @@ public void testGetExpiredOpenKeys() throws Exception { // cache, since they will be picked up once the cache is flushed. Set expiredKeys = new HashSet<>(); for (int i = 0; i < numExpiredOpenKeys; i++) { - OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "expired" + i, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, 0L, expiredAgeMillis); - TestOMRequestUtils.addKeyToTable(true, false, + OMRequestTestUtils.addKeyToTable(true, false, keyInfo, clientID, 0L, omMetadataManager); String groupID = omMetadataManager.getOpenKey(volumeName, bucketName, @@ -579,11 +579,11 @@ public void testGetExpiredOpenKeys() throws Exception { // Add unexpired keys to open key table. for (int i = 0; i < numUnexpiredOpenKeys; i++) { - OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, "unexpired" + i, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); - TestOMRequestUtils.addKeyToTable(true, false, + OMRequestTestUtils.addKeyToTable(true, false, keyInfo, clientID, 0L, omMetadataManager); } @@ -618,12 +618,12 @@ public void testGetExpiredOpenKeys() throws Exception { private void addKeysToOM(String volumeName, String bucketName, String keyName, int i) throws Exception { - if (i%2== 0) { - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + if (i % 2 == 0) { + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, 1000L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } else { - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java index 0413036686c..5804a9a39cc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java @@ -29,7 +29,10 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -40,7 +43,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; -import java.util.UUID; /** * Test Key Trash Service. @@ -57,12 +59,13 @@ public class TestTrashService { public TemporaryFolder tempFolder = new TemporaryFolder(); private KeyManager keyManager; - private OmMetadataManagerImpl omMetadataManager; + private OzoneManagerProtocol writeClient; + private OzoneManager om; private String volumeName; private String bucketName; @Before - public void setup() throws IOException { + public void setup() throws IOException, AuthenticationException { OzoneConfiguration configuration = new OzoneConfiguration(); File folder = tempFolder.newFolder(); @@ -72,38 +75,41 @@ public void setup() throws IOException { System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); ServerUtils.setOzoneMetaDirPath(configuration, folder.toString()); - omMetadataManager = new OmMetadataManagerImpl(configuration); - - keyManager = new KeyManagerImpl( - new ScmBlockLocationTestingClient(null, null, 0), - omMetadataManager, configuration, UUID.randomUUID().toString(), null); - keyManager.start(configuration); - + OmTestManagers omTestManagers + = new OmTestManagers(configuration); + keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); + om = omTestManagers.getOzoneManager(); volumeName = "volume"; bucketName = "bucket"; } + @After + public void cleanup() throws Exception { + om.stop(); + } + @Test public void testRecoverTrash() throws IOException { String keyName = "testKey"; String destinationBucket = "destBucket"; createAndDeleteKey(keyName); - boolean recoverOperation = omMetadataManager + boolean recoverOperation = keyManager.getMetadataManager() .recoverTrash(volumeName, bucketName, keyName, destinationBucket); Assert.assertTrue(recoverOperation); } private void createAndDeleteKey(String keyName) throws IOException { - TestOMRequestUtils.addVolumeToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addVolumeToOM(keyManager.getMetadataManager(), OmVolumeArgs.newBuilder() .setOwnerName("owner") .setAdminName("admin") .setVolume(volumeName) .build()); - TestOMRequestUtils.addBucketToOM(keyManager.getMetadataManager(), + OMRequestTestUtils.addBucketToOM(keyManager.getMetadataManager(), OmBucketInfo.newBuilder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -120,9 +126,9 @@ private void createAndDeleteKey(String keyName) throws IOException { .build(); /* Create and delete key in the Key Manager. */ - OpenKeySession session = keyManager.openKey(keyArgs); - keyManager.commitKey(keyArgs, session.getId()); - keyManager.deleteKey(keyArgs); + OpenKeySession session = writeClient.openKey(keyArgs); + writeClient.commitKey(keyArgs, session.getId()); + writeClient.deleteKey(keyArgs); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java index 7d7c310f685..01601668b61 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java @@ -109,7 +109,7 @@ private MockOzoneManagerProtocol(String nodeId, Exception ex) { public OMResponse submitRequest(RpcController controller, OzoneManagerProtocolProtos.OMRequest request) throws ServiceException { throw new ServiceException("ServiceException of type " + - exception.getClass() + " for "+ omNodeId, exception); + exception.getClass() + " for " + omNodeId, exception); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java index 0d7f95b94f7..16dc3228358 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithDummyResponse.java @@ -109,7 +109,7 @@ public void testDoubleBufferWithDummyResponse() throws Exception { assertEquals(0, metrics.getTotalNumOfFlushedTransactions()); assertEquals(0, metrics.getMaxNumberOfTransactionsFlushedInOneIteration()); - for (int i=0; i < bucketCount; i++) { + for (int i = 0; i < bucketCount; i++) { doubleBuffer.add(createDummyBucketResponse(volumeName), trxId.incrementAndGet()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index 3dc3b97fec3..92d5c6292f4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -33,7 +33,7 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; import org.apache.hadoop.ozone.om.request.volume.OMVolumeCreateRequest; @@ -284,7 +284,7 @@ public void testDoubleBufferWithMixOfTransactionsParallel() throws Exception { private void doMixTransactions(String volumeName, int bucketCount, Queue deleteBucketQueue, Queue bucketQueue) { - for (int i=0; i < bucketCount; i++) { + for (int i = 0; i < bucketCount; i++) { String bucketName = UUID.randomUUID().toString(); long transactionID = trxId.incrementAndGet(); OMBucketCreateResponse omBucketCreateResponse = createBucket(volumeName, @@ -304,7 +304,7 @@ private void doMixTransactions(String volumeName, int bucketCount, private OMClientResponse deleteBucket(String volumeName, String bucketName, long transactionID) { OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createDeleteBucketRequest(volumeName, bucketName); + OMRequestTestUtils.createDeleteBucketRequest(volumeName, bucketName); OMBucketDeleteRequest omBucketDeleteRequest = new OMBucketDeleteRequest(omRequest); @@ -434,7 +434,7 @@ private boolean assertRowCount(int expected, Table table) { private void doTransactions(int bucketCount) { String volumeName = UUID.randomUUID().toString(); createVolume(volumeName, trxId.incrementAndGet()); - for (int i=0; i< bucketCount; i++) { + for (int i = 0; i < bucketCount; i++) { createBucket(volumeName, UUID.randomUUID().toString(), trxId.incrementAndGet()); } @@ -450,7 +450,7 @@ private OMClientResponse createVolume(String volumeName, String admin = OzoneConsts.OZONE; String owner = UUID.randomUUID().toString(); OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createVolumeRequest(volumeName, admin, owner); + OMRequestTestUtils.createVolumeRequest(volumeName, admin, owner); OMVolumeCreateRequest omVolumeCreateRequest = new OMVolumeCreateRequest(omRequest); @@ -467,7 +467,7 @@ private OMBucketCreateResponse createBucket(String volumeName, String bucketName, long transactionID) { OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false, + OMRequestTestUtils.createBucketRequest(bucketName, volumeName, false, OzoneManagerProtocolProtos.StorageTypeProto.DISK); OMBucketCreateRequest omBucketCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java index 2b2b75af240..351f524c1da 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java @@ -185,7 +185,7 @@ public void testIsReadOnlyCapturesAllCmdTypeEnums() throws Exception { OmUtils.isReadOnly(request); assertFalse(cmdtype + " is not categorized in " + "OmUtils#isReadyOnly", - logCapturer.getOutput().contains("CmdType " + cmdtype +" is not " + + logCapturer.getOutput().contains("CmdType " + cmdtype + " is not " + "categorized as readOnly or not.")); logCapturer.clearOutput(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index b73bbc5d6f8..a0a7cd83807 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -303,7 +303,7 @@ public void testPreAppendTransaction() throws Exception { mockTransactionContext(createKeyRequest)); Assert.fail("Expected StateMachineException to be thrown when " + "submitting write request while prepared."); - } catch(StateMachineException smEx) { + } catch (StateMachineException smEx) { Assert.assertFalse(smEx.leaderShouldStepDown()); Throwable cause = smEx.getCause(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java similarity index 99% rename from hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java rename to hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java index bec1587411b..204b4aaa433 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/OMRequestTestUtils.java @@ -81,9 +81,9 @@ /** * Helper class to test OMClientRequest classes. */ -public final class TestOMRequestUtils { +public final class OMRequestTestUtils { - private TestOMRequestUtils() { + private OMRequestTestUtils() { //Do nothing } @@ -1042,7 +1042,7 @@ public static long getBucketId(String volumeName, String bucketName, public static long addParentsToDirTable(String volumeName, String bucketName, String key, OMMetadataManager omMetaMgr) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetaMgr); if (org.apache.commons.lang3.StringUtils.isBlank(key)) { return bucketId; @@ -1053,9 +1053,9 @@ public static long addParentsToDirTable(String volumeName, String bucketName, long txnID = 50; for (String pathElement : pathComponents) { OmDirectoryInfo omDirInfo = - TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId, + OMRequestTestUtils.createOmDirectoryInfo(pathElement, ++objectId, parentId); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, txnID, omMetaMgr); parentId = omDirInfo.getObjectID(); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java index 3f6bc154685..c31bf009334 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMClientRequestWithUserInfo.java @@ -94,7 +94,7 @@ public void testUserInfo() throws Exception { String bucketName = UUID.randomUUID().toString(); String volumeName = UUID.randomUUID().toString(); OzoneManagerProtocolProtos.OMRequest omRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, true, + OMRequestTestUtils.createBucketRequest(bucketName, volumeName, true, OzoneManagerProtocolProtos.StorageTypeProto.DISK); OMBucketCreateRequest omBucketCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java index 4ccf4206339..83b34006a7b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequest.java @@ -35,7 +35,7 @@ .StorageTypeProto; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; @@ -74,7 +74,7 @@ public void testValidateAndUpdateCacheWithNoVolume() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OMRequest originalRequest = TestOMRequestUtils.createBucketRequest( + OMRequest originalRequest = OMRequestTestUtils.createBucketRequest( bucketName, volumeName, false, StorageTypeProto.SSD); OMBucketCreateRequest omBucketCreateRequest = @@ -129,7 +129,7 @@ private OMBucketCreateRequest doPreExecute(String volumeName, String bucketName) throws Exception { addCreateVolumeToTable(volumeName, omMetadataManager); OMRequest originalRequest = - TestOMRequestUtils.createBucketRequest(bucketName, volumeName, false, + OMRequestTestUtils.createBucketRequest(bucketName, volumeName, false, StorageTypeProto.SSD); OMBucketCreateRequest omBucketCreateRequest = @@ -217,6 +217,6 @@ public static void addCreateVolumeToTable(String volumeName, OmVolumeArgs.newBuilder().setCreationTime(Time.now()) .setVolume(volumeName).setAdminName(UUID.randomUUID().toString()) .setOwnerName(UUID.randomUUID().toString()).build(); - TestOMRequestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); + OMRequestTestUtils.addVolumeToOM(omMetadataManager, omVolumeArgs); } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java index 28ba8defa18..f3dd34ca07a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketCreateRequestWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto; @@ -56,7 +56,7 @@ private OMBucketCreateRequest doPreExecute(String volumeName, String bucketName) throws Exception { addCreateVolumeToTable(volumeName, omMetadataManager); OMRequest originalRequest = - TestOMRequestUtils.createBucketReqFSO(bucketName, volumeName, + OMRequestTestUtils.createBucketReqFSO(bucketName, volumeName, false, StorageTypeProto.SSD); OMBucketCreateRequest omBucketCreateRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java index 1037baa8eaf..090d3fd12c8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketDeleteRequest.java @@ -21,9 +21,9 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -61,7 +61,7 @@ public void testValidateAndUpdateCache() throws Exception { new OMBucketDeleteRequest(omRequest); // Create Volume and bucket entries in DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, 1, @@ -93,7 +93,7 @@ public void testValidateAndUpdateCacheFailure() throws Exception { Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND, omClientResponse.getOMResponse().getStatus()); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index c2a18acedbb..243adfe0c49 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -22,11 +22,11 @@ import java.util.UUID; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos. @@ -80,7 +80,7 @@ public void testValidateAndUpdateCache() throws Exception { bucketName, true, Long.MAX_VALUE); // Create with default BucketInfo values - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMBucketSetPropertyRequest omBucketSetPropertyRequest = @@ -142,9 +142,9 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB( + OMRequestTestUtils.addVolumeToDB( volumeName, omMetadataManager, 10 * GB); - TestOMRequestUtils.addBucketToDB( + OMRequestTestUtils.addBucketToDB( volumeName, bucketName, omMetadataManager, 8 * GB); OMRequest omRequest = createSetBucketPropertyRequest(volumeName, bucketName, true, 20 * GB); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java index c4d4cc2d8a7..3c3e55e1e08 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketAddAclRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.TestBucketRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -42,7 +42,7 @@ public void testPreExecute() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:testUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketAddAclRequest(volumeName, bucketName, acl); long originModTime = originalRequest.getAddAclRequest() .getModificationTime(); @@ -66,13 +66,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String ownerName = "testUser"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils. + OMRequest originalRequest = OMRequestTestUtils. createBucketAddAclRequest(volumeName, bucketName, acl); OMBucketAddAclRequest omBucketAddAclRequest = new OMBucketAddAclRequest(originalRequest); @@ -101,7 +101,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketAddAclRequest(volumeName, bucketName, acl); OMBucketAddAclRequest omBucketAddAclRequest = new OMBucketAddAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java index eca281ca977..f39c052cd18 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketRemoveAclRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.bucket.acl; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.TestBucketRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -41,7 +41,7 @@ public void testPreExecute() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:testUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketRemoveAclRequest(volumeName, bucketName, acl); long originModTime = originalRequest.getRemoveAclRequest() .getModificationTime(); @@ -65,14 +65,14 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String ownerName = "testUser"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); // Add acl - OMRequest addAclRequest = TestOMRequestUtils + OMRequest addAclRequest = OMRequestTestUtils .createBucketAddAclRequest(volumeName, bucketName, acl); OMBucketAddAclRequest omBucketAddAclRequest = new OMBucketAddAclRequest(addAclRequest); @@ -93,7 +93,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { Assert.assertEquals(acl, bucketAcls.get(0)); // Remove acl. - OMRequest removeAclRequest = TestOMRequestUtils + OMRequest removeAclRequest = OMRequestTestUtils .createBucketRemoveAclRequest(volumeName, bucketName, acl); OMBucketRemoveAclRequest omBucketRemoveAclRequest = new OMBucketRemoveAclRequest(removeAclRequest); @@ -118,7 +118,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketRemoveAclRequest(volumeName, bucketName, acl); OMBucketRemoveAclRequest omBucketRemoveAclRequest = new OMBucketRemoveAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java index 519d1ddbd85..53a9d80917a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/acl/TestOMBucketSetAclRequest.java @@ -20,7 +20,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.TestBucketRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -42,7 +42,7 @@ public void testPreExecute() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:testUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketSetAclRequest(volumeName, bucketName, Lists.newArrayList(acl)); long originModTime = originalRequest.getSetAclRequest() @@ -67,15 +67,15 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String ownerName = "owner"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OzoneAcl userAcl = OzoneAcl.parseAcl("user:newUser:rw"); OzoneAcl groupAcl = OzoneAcl.parseAcl("group:newGroup:rw"); List acls = Lists.newArrayList(userAcl, groupAcl); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketSetAclRequest(volumeName, bucketName, acls); OMBucketSetAclRequest omBucketSetAclRequest = new OMBucketSetAclRequest(originalRequest); @@ -106,7 +106,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:newUser:rw"); - OMRequest originalRequest = TestOMRequestUtils + OMRequest originalRequest = OMRequestTestUtils .createBucketSetAclRequest(volumeName, bucketName, Lists.newArrayList(acl)); OMBucketSetAclRequest omBucketSetAclRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 66dddb7a9f6..9519eb3b327 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -46,7 +46,7 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .CreateDirectoryRequest; @@ -108,7 +108,7 @@ public void testPreExecute() throws Exception { String bucketName = "bucket1"; String keyName = "a/b/c"; - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -132,7 +132,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -201,7 +201,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { omDirectoryCreateRequest.preExecute(ozoneManager); omDirectoryCreateRequest = new OMDirectoryCreateRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMClientResponse omClientResponse = omDirectoryCreateRequest.validateAndUpdateCache(ozoneManager, 100L, @@ -224,10 +224,10 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(0, 12), 1L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -266,10 +266,10 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), 1L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -310,10 +310,10 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); // Add a key with first two levels. - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(0, 11), 1L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -348,7 +348,7 @@ public void testCreateDirectoryOMMetric() String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -396,7 +396,7 @@ private OMRequest createDirectoryRequest(String volumeName, String bucketName, private String genRandomKeyName() { StringBuilder keyNameBuilder = new StringBuilder(); keyNameBuilder.append(RandomStringUtils.randomAlphabetic(5)); - for (int i =0; i< 3; i++) { + for (int i = 0; i < 3; i++) { keyNameBuilder.append("/").append(RandomStringUtils.randomAlphabetic(5)); } return keyNameBuilder.toString(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java index 36a4c912539..beea90f909f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestWithFSO.java @@ -41,7 +41,7 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; import org.apache.hadoop.ozone.om.request.OMClientRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest; @@ -90,7 +90,7 @@ public void setup() throws Exception { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, folder.newFolder().getAbsolutePath()); - TestOMRequestUtils.configureFSOptimizedPaths(ozoneConfiguration, true); + OMRequestTestUtils.configureFSOptimizedPaths(ozoneConfiguration, true); omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); @@ -114,7 +114,7 @@ public void testPreExecute() throws Exception { String bucketName = "bucket1"; String keyName = "a/b/c"; - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -138,7 +138,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -218,7 +218,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { omDirCreateReqFSO = new OMDirectoryCreateRequestWithFSO(modifiedOmReq, BucketLayout.FILE_SYSTEM_OPTIMIZED); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMClientResponse omClientResponse = omDirCreateReqFSO.validateAndUpdateCache(ozoneManager, 100L, @@ -241,7 +241,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -252,14 +252,14 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() //1. Create root OmDirectoryInfo omDirInfo = - TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++, + OMRequestTestUtils.createOmDirectoryInfo(dirs.get(0), objID++, bucketID); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, omMetadataManager); //2. Create sub-directory under root - omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++, + omDirInfo = OMRequestTestUtils.createOmDirectoryInfo(dirs.get(1), objID++, omDirInfo.getObjectID()); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, 5000, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -293,7 +293,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); @@ -309,9 +309,9 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() long objID = 100 + indx; long txnID = 5000 + indx; // for index=0, parentID is bucketID - OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo( + OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( dirs.get(indx), objID, parentID); - TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(false, omDirInfo, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); @@ -356,7 +356,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = @@ -369,9 +369,9 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { long objID = 100 + indx; long txnID = 5000 + indx; // for index=0, parentID is bucketID - OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo( + OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( dirs.get(indx), objID, parentID); - TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(false, omDirInfo, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); @@ -381,7 +381,7 @@ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { long txnID = 50000; // Add a file into the FileTable, this is to simulate "file exists" check. - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, objID++); String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1); @@ -437,7 +437,7 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = @@ -448,14 +448,14 @@ public void testValidateAndUpdateCacheWithFileExistsInGivenPath() long txnID = 5000; // for index=0, parentID is bucketID - OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo( + OmDirectoryInfo omDirInfo = OMRequestTestUtils.createOmDirectoryInfo( dirs.get(0), objID++, parentID); - TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, + OMRequestTestUtils.addDirKeyToDirTable(true, omDirInfo, txnID, omMetadataManager); parentID = omDirInfo.getObjectID(); // Add a key in second level. - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, objID); @@ -506,7 +506,7 @@ public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception { String keyName = createDirKey(dirs, 255); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = @@ -545,7 +545,7 @@ public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception { String keyName = createDirKey(dirs, 256); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, @@ -581,7 +581,7 @@ public void testCreateDirectoryOMMetric() throws Exception { String keyName = createDirKey(dirs, 3); // Add volume and bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index e28dd6cb501..5c93dae0caa 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.Assert; import org.junit.Test; @@ -28,7 +29,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -51,7 +51,7 @@ public class TestOMFileCreateRequest extends TestOMKeyRequest { @Test - public void testPreExecute() throws Exception{ + public void testPreExecute() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, keyName, HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, false); @@ -91,7 +91,7 @@ public void testPreExecute() throws Exception{ } @Test - public void testPreExecuteWithBlankKey() throws Exception{ + public void testPreExecuteWithBlankKey() throws Exception { OMRequest omRequest = createFileRequest(volumeName, bucketName, "", HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, false); @@ -118,7 +118,7 @@ public void testValidateAndUpdateCache() throws Exception { HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, true); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); @@ -192,7 +192,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, false, true); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager); @@ -212,16 +212,16 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { testNonRecursivePath("a/b", false, false, true); // Create some child keys for the path - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -243,12 +243,12 @@ public void testValidateAndUpdateCacheWithRecursive() throws Exception { // Should be able to create file even if parent directories does not // exist and key already exist, as this is with overwrite enabled. testNonRecursivePath(UUID.randomUUID().toString(), false, false, false); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/d/e/f", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("c/d/e/f", true, true, false); // Create some child keys for the path - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/d", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath("a/b/c", false, true, false); @@ -263,7 +263,7 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() testNonRecursivePath(key, false, true, false); // Add the key to key table - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, key, 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -280,19 +280,19 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String key = "c/d/e/f"; // Need to add the path which starts with "c/d/e" to keyTable as this is // non-recursive parent should exist. - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/d/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "c/d/e/", 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); testNonRecursivePath(key, false, false, false); // Add the key to key table - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, key, 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -308,7 +308,7 @@ protected void testNonRecursivePath(String key, HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS, overWrite, recursive); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest); @@ -404,7 +404,7 @@ protected OMRequest createFileRequest( * @return OMFileCreateRequest reference */ @NotNull - protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){ + protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) { return new OMFileCreateRequest(omRequest); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java index a3b02fecd91..eb7e995a261 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestWithFSO.java @@ -22,7 +22,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -43,10 +43,10 @@ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception { Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys()); // Create parent dirs for the path - TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, "a/b/c", omMetadataManager); String fileNameD = "d"; - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); @@ -76,12 +76,12 @@ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite() // Add the key to key table OmDirectoryInfo omDirInfo = getDirInfo("c/d/e"); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omDirInfo.getObjectID() + 10, omDirInfo.getObjectID(), 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, "f", omKeyInfo, -1, omDirInfo.getObjectID() + 10, omMetadataManager); @@ -97,10 +97,10 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() String parentDir = "c/d/e"; String fileName = "f"; String key = parentDir + "/" + fileName; - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); // Create parent dirs for the path - long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is @@ -108,12 +108,12 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() testNonRecursivePath(key, false, false, false); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); // Even if key exists in KeyTable, should be able to create file as @@ -126,7 +126,7 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite() protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id, boolean doAssert) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(key, '/'); long parentId = bucketId; @@ -160,7 +160,7 @@ protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id, private OmDirectoryInfo getDirInfo(String key) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(key, '/'); long parentId = bucketId; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index 21209c4057f..6f8bfd015b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -25,13 +25,13 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -56,7 +56,7 @@ public void testPreExecute() throws Exception { @Test public void testValidateAndUpdateCache() throws Exception { // Add volume, bucket, key entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); addKeyToOpenKeyTable(volumeName, bucketName); @@ -94,7 +94,7 @@ public void testValidateAndUpdateCache() throws Exception { Assert.assertEquals(modifiedOmRequest.getAllocateBlockRequest() .getKeyArgs().getModificationTime(), omKeyInfo.getModificationTime()); - // creationTime was assigned at TestOMRequestUtils.addKeyToTable + // creationTime was assigned at OMRequestTestUtils.addKeyToTable // modificationTime was assigned at // doPreExecute(createAllocateBlockRequest()) Assert.assertTrue( @@ -153,7 +153,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { // Added only volume to DB. - TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); OMClientResponse omAllocateBlockResponse = @@ -175,7 +175,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { getOmAllocateBlockRequest(modifiedOmRequest); // Add volume, bucket entries to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omAllocateBlockRequest.getBucketLayout()); @@ -254,7 +254,7 @@ protected OMRequest createAllocateBlockRequest() { protected String addKeyToOpenKeyTable(String volumeName, String bucketName) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); return ""; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java index 6482a4fd646..6dec84d79ce 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequestWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; @@ -58,19 +58,19 @@ protected String addKeyToOpenKeyTable(String volumeName, String bucketName) keyName = parentDir + OzoneConsts.OM_KEY_PREFIX + fileName; // add parentDir to dirTable - long parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); long txnId = 50; long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); // add key to openFileTable - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); return omMetadataManager.getOzonePathKey(parentID, fileName); @@ -92,7 +92,7 @@ public BucketLayout getBucketLayout() { @Override protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id, boolean doAssert) throws Exception { - long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName, + long bucketId = OMRequestTestUtils.getBucketId(volumeName, bucketName, omMetadataManager); String[] pathComponents = StringUtils.split(key, '/'); long parentId = bucketId; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java index cda42a1b194..df6804818cb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequest.java @@ -21,7 +21,7 @@ import java.util.UUID; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequest; @@ -46,7 +46,7 @@ public class TestOMKeyAclRequest extends TestOMKeyRequest { @Test public void testKeyAddAclRequest() throws Exception { // Manually add volume, bucket and key to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -83,7 +83,7 @@ public void testKeyAddAclRequest() throws Exception { @Test public void testKeyRemoveAclRequest() throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -147,7 +147,7 @@ public void testKeyRemoveAclRequest() throws Exception { @Test public void testKeySetAclRequest() throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -248,7 +248,7 @@ private OMRequest createSetAclKeyRequest(OzoneAcl acl) { } protected String addKeyToTable() throws Exception { - TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java index 9f49d9d071e..d528926f7ad 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyAclRequestWithFSO.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAclRequest; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequestWithFSO; import org.apache.hadoop.ozone.om.request.key.acl.OMKeyRemoveAclRequestWithFSO; @@ -40,15 +40,15 @@ protected String addKeyToTable() throws Exception { keyName = key; // updated key name // Create parent dirs for the path - long parentId = TestOMRequestUtils + long parentId = OMRequestTestUtils .addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); - OmKeyInfo omKeyInfo = TestOMRequestUtils + OmKeyInfo omKeyInfo = OMRequestTestUtils .createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils + OMRequestTestUtils .addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); return omKeyInfo.getPath(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java index e36ff2fb2ce..d4122c0d49b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; import org.junit.Assert; @@ -36,7 +37,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -74,7 +74,7 @@ public void testValidateAndUpdateCacheWithUnknownBlockId() throws Exception { .stream().map(OmKeyLocationInfo::getFromProtobuf) .collect(Collectors.toList()); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omKeyCommitRequest.getBucketLayout()); String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList); @@ -136,7 +136,7 @@ public void testValidateAndUpdateCache() throws Exception { .map(OmKeyLocationInfo::getFromProtobuf) .collect(Collectors.toList()); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omKeyCommitRequest.getBucketLayout()); String ozoneKey = addKeyToOpenKeyTable(allocatedLocationList); @@ -239,7 +239,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); String ozoneKey = getOzonePathKey(); @@ -274,7 +274,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { OMKeyCommitRequest omKeyCommitRequest = getOmKeyCommitRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, omKeyCommitRequest.getBucketLayout()); String ozoneKey = getOzonePathKey(); @@ -445,12 +445,12 @@ private OMRequest createCommitKeyRequest() { private List getKeyLocation(int count) { List keyLocations = new ArrayList<>(); - for (int i=0; i < count; i++) { + for (int i = 0; i < count; i++) { KeyLocation keyLocation = KeyLocation.newBuilder() .setBlockID(HddsProtos.BlockID.newBuilder() .setContainerBlockID(HddsProtos.ContainerBlockID.newBuilder() - .setContainerID(i+1000).setLocalID(i+100).build())) + .setContainerID(i + 1000).setLocalID(i + 100).build())) .setOffset(0).setLength(200).setCreateVersion(version).build(); keyLocations.add(keyLocation); } @@ -470,7 +470,7 @@ protected String getOzonePathKey() throws IOException { @NotNull protected String addKeyToOpenKeyTable(List locationList) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager, locationList, version); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java index b57d402ae68..f64250a9b43 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -45,7 +45,7 @@ private long getBucketID() throws java.io.IOException { String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey); - if(omBucketInfo!= null){ + if (omBucketInfo != null) { return omBucketInfo.getObjectID(); } // bucket doesn't exists in DB @@ -66,20 +66,20 @@ protected String addKeyToOpenKeyTable(List locationList) if (getParentDir() == null) { parentID = getBucketID(); } else { - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, getParentDir(), omMetadataManager); } long objectId = 100; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, Time.now(), version); omKeyInfoFSO.appendNewBlocks(locationList, false); String fileName = OzoneFSUtils.getFileName(keyName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); return omMetadataManager.getOzonePathKey(parentID, fileName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index c5caf95cfed..5bc9d454847 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -28,12 +28,12 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -46,7 +46,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.RATIS; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS; -import static org.apache.hadoop.ozone.om.request.TestOMRequestUtils.addVolumeAndBucketToDB; +import static org.apache.hadoop.ozone.om.request.OMRequestTestUtils.addVolumeAndBucketToDB; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK; import static org.junit.Assert.fail; @@ -292,7 +292,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String openKey = getOpenKey(id); - TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, + OMRequestTestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE, omMetadataManager); // Before calling @@ -498,7 +498,7 @@ public void testKeyCreateWithFileSystemPathsEnabled() throws Exception { } protected void addToKeyTable(String keyName) throws Exception { - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName.substring(1), 0L, RATIS, THREE, omMetadataManager); } @@ -574,7 +574,7 @@ protected void checkCreatedPaths(OMKeyCreateRequest omKeyCreateRequest, protected long checkIntermediatePaths(Path keyPath) throws Exception { // Check intermediate paths are created keyPath = keyPath.getParent(); - while(keyPath != null) { + while (keyPath != null) { Assert.assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get( omMetadataManager .getOzoneDirKey(volumeName, bucketName, keyPath.toString()))); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java index 354ece092b2..2ddc5c71f91 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequestWithFSO.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -56,12 +56,12 @@ protected void addToKeyTable(String keyName) throws Exception { long parentId = checkIntermediatePaths(keyPath); String fileName = OzoneFSUtils.getFileName(keyName); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, fileName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, fileName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java index 34d9beacbbc..03f246753d4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java @@ -21,11 +21,11 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -48,7 +48,7 @@ public void testPreExecute() throws Exception { @Test public void testValidateAndUpdateCache() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); String ozoneKey = addKeyToTable(); @@ -89,7 +89,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { // Add only volume and bucket entry to DB. // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, getBucketLayout()); OMClientResponse omClientResponse = @@ -125,7 +125,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { OMKeyDeleteRequest omKeyDeleteRequest = getOmKeyDeleteRequest(modifiedOmRequest); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMClientResponse omClientResponse = omKeyDeleteRequest .validateAndUpdateCache(ozoneManager, 100L, @@ -172,7 +172,7 @@ private OMRequest createDeleteKeyRequest() { } protected String addKeyToTable() throws Exception { - TestOMRequestUtils.addKeyToTable(false, volumeName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java index b63e0646fa3..24da4c3dd6e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.security.acl.OzonePrefixPath; import org.apache.hadoop.util.Time; @@ -60,17 +60,17 @@ protected String addKeyToTable() throws Exception { keyName = key; // updated key name // Create parent dirs for the path - long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, parentDir, omMetadataManager); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, key, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); omKeyInfo.setKeyName(fileName); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, fileName, omKeyInfo, -1, 50, omMetadataManager); return omKeyInfo.getPath(); } @@ -78,7 +78,7 @@ protected String addKeyToTable() throws Exception { @Test public void testOzonePrefixPathViewer() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); String ozoneKey = addKeyToTable(); @@ -125,10 +125,10 @@ private void verifyPath(OzonePrefixPath ozonePrefixPath, String pathName, pathName); Assert.assertTrue("Failed to list keyPaths", pathItr.hasNext()); Assert.assertEquals(expectedPath, pathItr.next().getTrimmedName()); - try{ + try { pathItr.next(); Assert.fail("Reached end of the list!"); - } catch (NoSuchElementException nse){ + } catch (NoSuchElementException nse) { // expected } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java index c3e2d036dc4..4b2b46794f1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyPurgeRequestAndResponse.java @@ -23,10 +23,10 @@ import java.util.List; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.key.OMKeyPurgeResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -54,13 +54,13 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) bucket = bucketName; } // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucket, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket, omMetadataManager); List ozoneKeyNames = new ArrayList<>(numKeys); for (int i = 1; i <= numKeys; i++) { String key = keyName + "-" + i; - TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucket, + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucket, key, clientID, replicationType, replicationFactor, trxnIndex++, omMetadataManager); ozoneKeyNames.add(omMetadataManager.getOzoneKey( @@ -69,7 +69,7 @@ private List createAndDeleteKeys(Integer trxnIndex, String bucket) List deletedKeyNames = new ArrayList<>(numKeys); for (String ozoneKey : ozoneKeyNames) { - String deletedKeyName = TestOMRequestUtils.deleteKey( + String deletedKeyName = OMRequestTestUtils.deleteKey( ozoneKey, omMetadataManager, trxnIndex++); deletedKeyNames.add(deletedKeyName); } @@ -137,7 +137,7 @@ public void testValidateAndUpdateCache() throws Exception { .setStatus(Status.OK) .build(); - try(BatchOperation batchOperation = + try (BatchOperation batchOperation = omMetadataManager.getStore().initBatchOperation()) { OMKeyPurgeResponse omKeyPurgeResponse = new OMKeyPurgeResponse( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java index 69b2421f9cc..d229d8363b1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRenameRequest.java @@ -19,11 +19,12 @@ package org.apache.hadoop.ozone.om.request.key; import java.util.UUID; + +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -49,9 +50,9 @@ public void testValidateAndUpdateCache() throws Exception { OMRequest modifiedOmRequest = doPreExecute(createRenameKeyRequest(toKeyName)); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = @@ -97,7 +98,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception { // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = @@ -135,7 +136,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { doPreExecute(createRenameKeyRequest(toKeyName)); // Add only volume entry to DB. - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = new OMKeyRenameRequest(modifiedOmRequest); @@ -158,7 +159,7 @@ public void testValidateAndUpdateCacheWithToKeyInvalid() throws Exception { // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = @@ -183,7 +184,7 @@ public void testValidateAndUpdateCacheWithFromKeyInvalid() throws Exception { // In actual implementation we don't check for bucket/volume exists // during delete key. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMKeyRenameRequest omKeyRenameRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java index 312e521f27c..8d079eb5693 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysDeleteRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.key; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeyArgs; @@ -123,7 +123,7 @@ private void createPreRequisites() throws Exception { deleteKeyList = new ArrayList<>(); // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); int count = 10; @@ -138,7 +138,7 @@ private void createPreRequisites() throws Exception { for (int i = 0; i < count; i++) { key = parentDir.concat("/key" + i); - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, omMetadataManager); deleteKeyArgs.addKeys(key); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java index f69f6615a1f..24fd138fdfe 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -116,7 +116,7 @@ public void testKeysRenameRequestFail() throws Exception { private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); List renameKeyList = new ArrayList<>(); @@ -124,7 +124,7 @@ private OMRequest createRenameKeyRequest(Boolean isIllegal) throws Exception { for (int i = 0; i < count; i++) { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); - TestOMRequestUtils.addKeyToTableCache(volumeName, bucketName, + OMRequestTestUtils.addKeyToTableCache(volumeName, bucketName, parentDir.concat("/key" + i), HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java index 29c4285cdd6..958a6a0f988 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java @@ -28,13 +28,12 @@ import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.response.key.OMOpenKeysDeleteRequest; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.junit.Assert; import org.junit.Test; import com.google.common.base.Optional; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .Status; @@ -225,14 +224,14 @@ private void addToOpenKeyTableDB(long keySize, OpenKeyBucket... openKeys) for (OpenKey openKey: openKeyBucket.getKeysList()) { if (keySize > 0) { - OmKeyInfo keyInfo = TestOMRequestUtils.createOmKeyInfo(volume, bucket, + OmKeyInfo keyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, openKey.getName(), replicationType, replicationFactor); - TestOMRequestUtils.addKeyLocationInfo(keyInfo, 0, keySize); + OMRequestTestUtils.addKeyLocationInfo(keyInfo, 0, keySize); - TestOMRequestUtils.addKeyToTable(true, false, + OMRequestTestUtils.addKeyToTable(true, false, keyInfo, openKey.getClientID(), 0L, omMetadataManager); } else { - TestOMRequestUtils.addKeyToTable(true, + OMRequestTestUtils.addKeyToTable(true, volume, bucket, openKey.getName(), openKey.getClientID(), replicationType, replicationFactor, omMetadataManager); } @@ -338,7 +337,7 @@ private void assertNotInOpenKeyTable(OpenKeyBucket... openKeys) private List getFullOpenKeyNames(OpenKeyBucket... openKeyBuckets) { List fullKeyNames = new ArrayList<>(); - for(OpenKeyBucket keysPerBucket: openKeyBuckets) { + for (OpenKeyBucket keysPerBucket: openKeyBuckets) { String volume = keysPerBucket.getVolumeName(); String bucket = keysPerBucket.getBucketName(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java index 5690ff20cb6..08063f3a71f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMPrefixAclRequest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.PrefixManager; import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.key.acl.prefix.OMPrefixAddAclRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -46,9 +46,9 @@ public void testAclRequest() throws Exception { when(ozoneManager.getPrefixManager()).thenReturn(prefixManager); // Manually add volume, bucket and key to DB - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); - TestOMRequestUtils.addKeyToTable(false, false, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(false, false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, 1L, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java index b7094985f17..78a2346aeb9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java @@ -21,10 +21,10 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -49,7 +49,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = UUID.randomUUID().toString(); // Add volume and bucket to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, @@ -100,7 +100,7 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMRequest modifiedRequest = doPreExecuteInitiateMPU( volumeName, bucketName, keyName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java index 38496c4a653..1f6b08c3cd5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; @@ -55,7 +55,7 @@ public void testValidateAndUpdateCache() throws Exception { String keyName = prefix + fileName; // Add volume and bucket to DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java index a89a5bdd184..581bd2ac569 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartRequest.java @@ -46,7 +46,7 @@ .OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Part; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.when; @@ -112,7 +112,7 @@ public void stop() { protected OMRequest doPreExecuteInitiateMPU( String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, + OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = @@ -150,7 +150,7 @@ protected OMRequest doPreExecuteCommitMPU( // Just set dummy size long dataSize = 100L; OMRequest omRequest = - TestOMRequestUtils.createCommitPartMPURequest(volumeName, bucketName, + OMRequestTestUtils.createCommitPartMPURequest(volumeName, bucketName, keyName, clientID, dataSize, multipartUploadID, partNumber); S3MultipartUploadCommitPartRequest s3MultipartUploadCommitPartRequest = getS3MultipartUploadCommitReq(omRequest); @@ -179,7 +179,7 @@ protected OMRequest doPreExecuteAbortMPU( String multipartUploadID) throws IOException { OMRequest omRequest = - TestOMRequestUtils.createAbortMPURequest(volumeName, bucketName, + OMRequestTestUtils.createAbortMPURequest(volumeName, bucketName, keyName, multipartUploadID); @@ -201,7 +201,7 @@ protected OMRequest doPreExecuteCompleteMPU(String volumeName, List partList) throws IOException { OMRequest omRequest = - TestOMRequestUtils.createCompleteMPURequest(volumeName, bucketName, + OMRequestTestUtils.createCompleteMPURequest(volumeName, bucketName, keyName, multipartUploadID, partList); S3MultipartUploadCompleteRequest s3MultipartUploadCompleteRequest = @@ -229,7 +229,7 @@ protected OMRequest doPreExecuteCompleteMPU(String volumeName, protected OMRequest doPreExecuteInitiateMPUWithFSO( String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, + OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); S3InitiateMultipartUploadRequestWithFSO diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java index 1afbb5fc0ee..85afa626519 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequest.java @@ -21,10 +21,10 @@ import java.io.IOException; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -52,7 +52,7 @@ public void testValidateAndUpdateCache() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -104,7 +104,7 @@ public void testValidateAndUpdateCacheMultipartNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); String multipartUploadID = "randomMPU"; @@ -162,7 +162,7 @@ public void testValidateAndUpdateCacheBucketNotFound() throws Exception { String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); String multipartUploadID = "randomMPU"; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java index dc768753f65..440830c1bcc 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadAbortRequestWithFSO.java @@ -20,7 +20,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import java.util.UUID; @@ -58,7 +58,7 @@ protected String getKeyName() { protected void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java index f4d7f4ade89..a017764ef35 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequest.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -54,7 +54,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -116,7 +116,7 @@ public void testValidateAndUpdateCacheMultipartNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -155,7 +155,7 @@ public void testValidateAndUpdateCacheKeyNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); @@ -193,7 +193,7 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); long clientID = Time.now(); @@ -220,7 +220,7 @@ public void testValidateAndUpdateCacheBucketFound() throws Exception { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java index f9ae9d2d9d2..7de016ad1df 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCommitPartRequestWithFSO.java @@ -24,7 +24,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -64,12 +64,12 @@ protected String getKeyName() { protected void addKeyToOpenKeyTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { long txnLogId = 10000; - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentID + 1, parentID, txnLogId, Time.now()); String fileName = OzoneFSUtils.getFileName(keyName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfo, clientID, txnLogId, omMetadataManager); } @@ -92,7 +92,7 @@ protected String getOpenKey(String volumeName, String bucketName, protected OMRequest doPreExecuteInitiateMPU(String volumeName, String bucketName, String keyName) throws Exception { OMRequest omRequest = - TestOMRequestUtils.createInitiateMPURequest(volumeName, bucketName, + OMRequestTestUtils.createInitiateMPURequest(volumeName, bucketName, keyName); S3InitiateMultipartUploadRequest s3InitiateMultipartUploadRequest = @@ -116,7 +116,7 @@ protected OMRequest doPreExecuteInitiateMPU(String volumeName, protected void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java index 3993597ec13..2e12aa735a8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequest.java @@ -24,11 +24,11 @@ import java.util.UUID; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -122,7 +122,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { protected void addVolumeAndBucket(String volumeName, String bucketName) throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.DEFAULT); } @@ -132,7 +132,7 @@ public void testInvalidPartOrderError() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); OMRequest initiateMPURequest = doPreExecuteInitiateMPU(volumeName, @@ -163,7 +163,7 @@ public void testInvalidPartOrderError() throws Exception { List partList = new ArrayList<>(); - String partName= getPartName(volumeName, bucketName, keyName, + String partName = getPartName(volumeName, bucketName, keyName, multipartUploadID, 23); partList.add(Part.newBuilder().setPartName(partName).setPartNumber(23) @@ -217,7 +217,7 @@ public void testValidateAndUpdateCacheBucketNotFound() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); List partList = new ArrayList<>(); OMRequest completeMultipartRequest = doPreExecuteCompleteMPU(volumeName, @@ -242,7 +242,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() String bucketName = UUID.randomUUID().toString(); String keyName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); List partList = new ArrayList<>(); @@ -265,7 +265,7 @@ public void testValidateAndUpdateCacheNoSuchMultipartUploadError() protected void addKeyToTable(String volumeName, String bucketName, String keyName, long clientID) throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java index c6533ef9d48..f0a1dfb6826 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3MultipartUploadCompleteRequestWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.util.Time; @@ -54,7 +54,7 @@ protected String getKeyName() { @Override protected void addVolumeAndBucket(String volumeName, String bucketName) throws Exception { - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED); } @@ -71,7 +71,7 @@ protected void addKeyToTable(String volumeName, String bucketName, long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); @@ -79,7 +79,7 @@ protected void addKeyToTable(String volumeName, String bucketName, // add key to openFileTable String fileName = OzoneFSUtils.getFileName(keyName); omKeyInfoFSO.setKeyName(fileName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, omKeyInfoFSO.getObjectID(), omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/upgrade/TestOMCancelPrepareRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/upgrade/TestOMCancelPrepareRequest.java index 9cdbef35a4b..d00494f5b09 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/upgrade/TestOMCancelPrepareRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/upgrade/TestOMCancelPrepareRequest.java @@ -17,9 +17,9 @@ package org.apache.hadoop.ozone.om.request.upgrade; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; +import org.apache.hadoop.ozone.om.request.key.OMOpenKeysDeleteRequest; import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; -import org.apache.hadoop.ozone.om.response.key.OMOpenKeysDeleteRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java index aba72f28e09..a0b94d16261 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeCreateRequest.java @@ -29,7 +29,7 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -84,7 +84,7 @@ public void testValidateAndUpdateCacheWithZeroMaxUserVolumeCount() Assert.assertEquals(expectedObjId, respone.getOmVolumeArgs() .getObjectID()); Assert.assertEquals(txLogIndex, respone.getOmVolumeArgs().getUpdateID()); - } catch (IllegalArgumentException ex){ + } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("should be greater than zero", ex); } @@ -183,7 +183,7 @@ public void testValidateAndUpdateCacheWithVolumeAlreadyExists() String adminName = "user1"; String ownerName = "user1"; - TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, omMetadataManager); OMRequest originalRequest = createVolumeRequest(volumeName, adminName, ownerName); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java index 73a28882702..ac6191fdccb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeDeleteRequest.java @@ -20,11 +20,11 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -62,8 +62,8 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { omVolumeDeleteRequest.preExecute(ozoneManager); // Add volume and user to DB - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); String volumeKey = omMetadataManager.getVolumeKey(volumeName); String ownerKey = omMetadataManager.getUserKey(ownerName); @@ -127,11 +127,11 @@ public void testValidateAndUpdateCacheWithVolumeNotEmpty() throws Exception { OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName).build(); - TestOMRequestUtils.addBucketToOM(omMetadataManager, omBucketInfo); + OMRequestTestUtils.addBucketToOM(omMetadataManager, omBucketInfo); // Add user and volume to DB. - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OMClientResponse omClientResponse = omVolumeDeleteRequest.validateAndUpdateCache(ozoneManager, 1L, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java index 9cd04f69d2d..de633ac4a08 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetOwnerRequest.java @@ -23,6 +23,7 @@ import java.util.Set; import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.junit.Assert; import org.junit.Test; @@ -30,7 +31,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos .OMRequest; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; /** @@ -43,7 +43,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); String newOwner = "user1"; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, newOwner); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = new OMVolumeSetQuotaRequest(originalRequest); @@ -59,13 +59,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); String newOwner = "user2"; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, newOwner); OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = new OMVolumeSetOwnerRequest(originalRequest); @@ -98,7 +98,12 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { .get(volumeKey).getCreationTime(); long modificationTime = omMetadataManager.getVolumeTable() .get(volumeKey).getModificationTime(); - Assert.assertTrue(modificationTime > creationTime); + + // creationTime and modificationTime can be the same to the precision of a + // millisecond - since there is no time-consuming operation between + // OMRequestTestUtils.addVolumeToDB (sets creationTime) and + // preExecute (sets modificationTime). + Assert.assertTrue(modificationTime >= creationTime); OzoneManagerStorageProtos.PersistedUserVolumeInfo newOwnerVolumeList = omMetadataManager.getUserTable().get(newOwnerKey); @@ -124,7 +129,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String ownerName = "user1"; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, ownerName); OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = @@ -150,7 +155,7 @@ public void testInvalidRequest() throws Exception { // create request with quota set. OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, 100L, 100L); OMVolumeSetOwnerRequest omVolumeSetOwnerRequest = @@ -174,13 +179,13 @@ public void testInvalidRequest() throws Exception { public void testOwnSameVolumeTwice() throws Exception { String volumeName = UUID.randomUUID().toString(); String owner = "user1"; - TestOMRequestUtils.addVolumeToDB(volumeName, owner, omMetadataManager); - TestOMRequestUtils.addUserToDB(volumeName, owner, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, owner, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, owner, omMetadataManager); String newOwner = "user2"; // Create request to set new owner OMRequest omRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, newOwner); + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, newOwner); OMVolumeSetOwnerRequest setOwnerRequest = new OMVolumeSetOwnerRequest(omRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index 3d63ae5a7e3..6539f481cb3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -22,7 +22,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; @@ -44,7 +44,7 @@ public void testPreExecute() throws Exception { long quotaInBytes = 100L; long quotaInNamespace = 1000L; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, quotaInBytes, quotaInNamespace); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = @@ -62,11 +62,11 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { long quotaInBytes = 100L; long quotaInNamespace = 1000L; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, quotaInBytes, quotaInNamespace); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = @@ -108,7 +108,12 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { .getVolumeTable().get(volumeKey).getCreationTime(); long modificationTime = omMetadataManager .getVolumeTable().get(volumeKey).getModificationTime(); - Assert.assertTrue(modificationTime > creationTime); + + // creationTime and modificationTime can be the same to the precision of a + // millisecond - since there is no time-consuming operation between + // OMRequestTestUtils.addVolumeToDB (sets creationTime) and + // preExecute (sets modificationTime). + Assert.assertTrue(modificationTime >= creationTime); } @Test @@ -116,10 +121,10 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { String volumeName = UUID.randomUUID().toString(); long quotaInBytes = 100L; - long quotaInNamespace= 100L; + long quotaInNamespace = 100L; OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, quotaInBytes, quotaInNamespace); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = @@ -144,7 +149,7 @@ public void testInvalidRequest() throws Exception { // create request with owner set. OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, "user1"); // Creating OMVolumeSetQuotaRequest with SetProperty request set with owner. @@ -169,12 +174,12 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeToDB( + OMRequestTestUtils.addVolumeToDB( volumeName, omMetadataManager, 10 * GB); - TestOMRequestUtils.addBucketToDB( + OMRequestTestUtils.addBucketToDB( volumeName, bucketName, omMetadataManager, 8 * GB); OMRequest originalRequest = - TestOMRequestUtils.createSetVolumePropertyRequest(volumeName, + OMRequestTestUtils.createSetVolumePropertyRequest(volumeName, 5 * GB, 100L); OMVolumeSetQuotaRequest omVolumeSetQuotaRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java index 28a5ce17cdd..0822fd5293a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -43,7 +43,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); long originModTime = originalRequest.getAddAclRequest() .getModificationTime(); @@ -65,13 +65,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(originalRequest); @@ -109,7 +109,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java index b6ef38178e8..cf13ec42968 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeRemoveAclRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -42,7 +42,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeRemoveAclRequest(volumeName, acl); long originModTime = originalRequest.getRemoveAclRequest() .getModificationTime(); @@ -65,13 +65,13 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rwdlncxy[ACCESS]"); // add acl first OMRequest addAclRequest = - TestOMRequestUtils.createVolumeAddAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeAddAclRequest(volumeName, acl); OMVolumeAddAclRequest omVolumeAddAclRequest = new OMVolumeAddAclRequest(addAclRequest); omVolumeAddAclRequest.preExecute(ozoneManager); @@ -85,7 +85,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { // remove acl OMRequest removeAclRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeRemoveAclRequest(volumeName, acl); OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = new OMVolumeRemoveAclRequest(removeAclRequest); omVolumeRemoveAclRequest.preExecute(ozoneManager); @@ -121,7 +121,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeRemoveAclRequest(volumeName, acl); + OMRequestTestUtils.createVolumeRemoveAclRequest(volumeName, acl); OMVolumeRemoveAclRequest omVolumeRemoveAclRequest = new OMVolumeRemoveAclRequest(originalRequest); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java index 0bd052cc495..4ffb6ede58c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java @@ -21,7 +21,7 @@ import com.google.common.collect.Lists; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.volume.TestOMVolumeRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; @@ -43,7 +43,7 @@ public void testPreExecute() throws Exception { String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, + OMRequestTestUtils.createVolumeSetAclRequest(volumeName, Lists.newArrayList(acl)); long originModTime = originalRequest.getSetAclRequest() .getModificationTime(); @@ -66,8 +66,8 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { String volumeName = UUID.randomUUID().toString(); String ownerName = "user1"; - TestOMRequestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); - TestOMRequestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addUserToDB(volumeName, ownerName, omMetadataManager); + OMRequestTestUtils.addVolumeToDB(volumeName, ownerName, omMetadataManager); OzoneAcl userAccessAcl = OzoneAcl.parseAcl("user:bilbo:rw[ACCESS]"); OzoneAcl groupDefaultAcl = @@ -76,7 +76,7 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { List acls = Lists.newArrayList(userAccessAcl, groupDefaultAcl); OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, acls); + OMRequestTestUtils.createVolumeSetAclRequest(volumeName, acls); OMVolumeSetAclRequest omVolumeSetAclRequest = new OMVolumeSetAclRequest(originalRequest); @@ -117,7 +117,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() String volumeName = UUID.randomUUID().toString(); OzoneAcl acl = OzoneAcl.parseAcl("user:bilbo:rw"); OMRequest originalRequest = - TestOMRequestUtils.createVolumeSetAclRequest(volumeName, + OMRequestTestUtils.createVolumeSetAclRequest(volumeName, Lists.newArrayList(acl)); OMVolumeSetAclRequest omVolumeSetAclRequest = diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java index 230360bad7e..3c17c8da161 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/TestCleanupTableInfo.java @@ -217,7 +217,7 @@ private void assertCacheItemCounts( for (String tableName : om.getMetadataManager().listTableNames()) { if (!cleanup.contains(tableName)) { assertEquals( - "Cache item count of table " +tableName, + "Cache item count of table " + tableName, cacheItemCount.get(tableName).intValue(), Iterators.size( om.getMetadataManager().getTable(tableName).cacheIterator() @@ -289,7 +289,7 @@ private OMFileCreateRequest anOMFileCreateRequest() { return new OMFileCreateRequest(protoRequest); } - private OMKeyCreateRequest anOMKeyCreateRequest(){ + private OMKeyCreateRequest anOMKeyCreateRequest() { OMRequest protoRequest = mock(OMRequest.class); when(protoRequest.getCreateKeyRequest()).thenReturn(aKeyCreateRequest()); when(protoRequest.getCmdType()).thenReturn(Type.CreateKey); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java index 33d8c81a093..6703f4c55ea 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponse.java @@ -26,7 +26,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos @@ -75,7 +75,7 @@ public void testAddToDBBatch() throws Exception { String keyName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, OzoneFSUtils.addTrailingSlashIfNeeded(keyName), HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java index 1562fd112a3..35064ba17a2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -64,7 +64,7 @@ public void testAddToDBBatch() throws Exception { long parentID = 100; OmDirectoryInfo omDirInfo = - TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID); + OMRequestTestUtils.createOmDirectoryInfo(keyName, 500, parentID); OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse( OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance()) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java index eeffe19c513..397e10ad6ec 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse; import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -40,7 +40,7 @@ public class TestOMFileCreateResponseWithFSO extends TestOMKeyCreateResponse { @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(omBucketInfo); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationType, replicationFactor, omBucketInfo.getObjectID() + 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java index 71dd7dbdbb8..8e4dbbf82d3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponse.java @@ -25,7 +25,7 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.AllocateBlockResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -99,7 +99,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { } protected OmKeyInfo createOmKeyInfo() throws Exception { - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java index bbbacaea073..83c92d508f2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMAllocateBlockResponseWithFSO.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -48,7 +48,7 @@ protected OmKeyInfo createOmKeyInfo() throws Exception { long objectId = parentID + 1; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 8709a07c6e2..de8a4bbc13f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; /** @@ -79,7 +79,7 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchNoOp() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) @@ -139,7 +139,7 @@ public void testAddToDBBatchOnOverwrite() throws Exception { @NotNull protected void addKeyToOpenKeyTable() throws Exception { - TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(true, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java index 5b67eecfe95..caed4a031eb 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseWithFSO.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -48,7 +48,7 @@ protected OMKeyCommitResponse getOmKeyCommitResponse(OmKeyInfo omKeyInfo, @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(omBucketInfo); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationType, replicationFactor, omBucketInfo.getObjectID() + 1, @@ -63,13 +63,13 @@ protected void addKeyToOpenKeyTable() throws Exception { long objectId = parentID + 10; OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100, Time.now()); String fileName = OzoneFSUtils.getFileName(keyName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientID, txnLogId, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java index b508e152161..a7ec3c1d473 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponseWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.util.Time; import org.jetbrains.annotations.NotNull; @@ -46,7 +46,7 @@ protected String getOpenKeyName() { @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(omBucketInfo); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, omBucketInfo.getBucketName(), keyName, replicationType, replicationFactor, omBucketInfo.getObjectID() + 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 2a7e2ede379..c8a1f8bb32d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -24,12 +24,12 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import java.util.ArrayList; @@ -177,7 +177,7 @@ protected String addKeyToTable() throws Exception { String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); return ozoneKey; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java index f21e49fc192..44555659140 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponseWithFSO.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.util.Time; import org.junit.Assert; @@ -41,20 +41,20 @@ protected OMKeyDeleteResponse getOmKeyDeleteResponse(OmKeyInfo omKeyInfo, @Override protected String addKeyToTable() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); // Create parent dirs for the path - long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName, + long parentId = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, "", omMetadataManager); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentId + 1, parentId, 100, Time.now()); - TestOMRequestUtils.addFileToKeyTable(false, false, + OMRequestTestUtils.addFileToKeyTable(false, false, keyName, omKeyInfo, -1, 50, omMetadataManager); return omKeyInfo.getPath(); } @@ -62,7 +62,7 @@ protected String addKeyToTable() throws Exception { @Override protected OmKeyInfo getOmKeyInfo() { Assert.assertNotNull(getOmBucketInfo()); - return TestOMRequestUtils.createOmKeyInfo(volumeName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, getOmBucketInfo().getBucketName(), keyName, replicationType, replicationFactor, getOmBucketInfo().getObjectID() + 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java index 7f040036586..92b24486505 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyRenameResponse.java @@ -20,11 +20,11 @@ import java.util.UUID; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; /** @@ -34,7 +34,7 @@ public class TestOMKeyRenameResponse extends TestOMKeyResponse { @Test public void testAddToDBBatch() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = @@ -55,7 +55,7 @@ public void testAddToDBBatch() throws Exception { String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); Assert.assertTrue( @@ -77,7 +77,7 @@ public void testAddToDBBatch() throws Exception { @Test public void testAddToDBBatchWithErrorResponse() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = @@ -98,7 +98,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { String ozoneToKey = omMetadataManager.getOzoneKey(volumeName, bucketName, toKeyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); Assert.assertTrue( @@ -122,7 +122,7 @@ public void testAddToDBBatchWithErrorResponse() throws Exception { @Test public void testAddToDBBatchWithSameKeyName() throws Exception { - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); OzoneManagerProtocolProtos.OMResponse omResponse = @@ -140,7 +140,7 @@ public void testAddToDBBatchWithSameKeyName() throws Exception { String ozoneFromKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); - TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName, keyName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, clientID, replicationType, replicationFactor, omMetadataManager); Assert.assertTrue( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index bdafb6841a6..b41c8680b3a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.jetbrains.annotations.NotNull; import org.junit.After; import org.junit.Before; @@ -88,7 +88,7 @@ protected String getOpenKeyName() { @NotNull protected OmKeyInfo getOmKeyInfo() { - return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + return OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 9fe3c8202f5..0d6a6d90065 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -21,7 +21,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeleteKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -57,7 +57,7 @@ private void createPreRequisities() throws Exception { String ozoneKey = ""; for (int i = 0; i < 10; i++) { keyName = parent.concat(key + i); - TestOMRequestUtils.addKeyToTable(false, volumeName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, keyName, 0L, RATIS, THREE, omMetadataManager); ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); omKeyInfoList diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java index 3aa92c0b309..d59ce9abae8 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java @@ -20,7 +20,7 @@ import org.apache.hadoop.ozone.om.helpers.OmRenameKeys; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.RenameKeysResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; @@ -61,7 +61,7 @@ public void testKeysRenameResponse() throws Exception { omMetadataManager.getStore().commitBatchOperation(batchOperation); // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); for (int i = 0; i < count; i++) { @@ -112,14 +112,14 @@ public void testKeysRenameResponseFail() throws Exception { private void createPreRequisities() throws Exception { // Add volume, bucket and key entries to OM DB. - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); Map formAndToKeyInfo = new HashMap<>(); for (int i = 0; i < count; i++) { String key = parentDir.concat("/key" + i); String toKey = parentDir.concat("/newKey" + i); - TestOMRequestUtils.addKeyToTable(false, volumeName, + OMRequestTestUtils.addKeyToTable(false, volumeName, bucketName, parentDir.concat("/key" + i), 0L, RATIS, THREE, omMetadataManager); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index 56e372d4259..ebe2deeaf5f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -18,11 +18,11 @@ package org.apache.hadoop.ozone.om.response.key; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.junit.Assert; import org.junit.Test; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -164,11 +164,11 @@ private Map addOpenKeysToDB(String volume, int numKeys, String key = UUID.randomUUID().toString(); long clientID = random.nextLong(); - OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volume, + OmKeyInfo omKeyInfo = OMRequestTestUtils.createOmKeyInfo(volume, bucket, key, replicationType, replicationFactor); if (keyLength > 0) { - TestOMRequestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); + OMRequestTestUtils.addKeyLocationInfo(omKeyInfo, 0, keyLength); } String openKey = omMetadataManager.getOpenKey(volume, bucket, @@ -177,7 +177,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, // Add to the open key table DB, not cache. // In a real execution, the open key would have been removed from the // cache by the request, and it would only remain in the DB. - TestOMRequestUtils.addKeyToTable(true, false, omKeyInfo, + OMRequestTestUtils.addKeyToTable(true, false, omKeyInfo, clientID, 0L, omMetadataManager); Assert.assertTrue(omMetadataManager.getOpenKeyTable(getBucketLayout()) .isExist(openKey)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java index 6d640b36070..f2089457de6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponseWithFSO.java @@ -23,7 +23,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.util.Time; @@ -50,7 +50,7 @@ public void testAddDBToBatch() throws Exception { String keyName = getKeyName(); String multipartUploadID = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -88,7 +88,7 @@ public void testAddDBToBatchWithParts() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -160,7 +160,7 @@ public void testWithMultipartUploadError() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); @@ -224,7 +224,7 @@ private String getKeyName() { private void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java index b52d3e2b404..1ff17513cab 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponseWithFSO.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.util.Time; @@ -54,7 +54,7 @@ public void testAddDBToBatch() throws Exception { String keyName = getKeyName(); String multipartUploadID = UUID.randomUUID().toString(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); long txnId = 50; @@ -81,14 +81,14 @@ public void testAddDBToBatch() throws Exception { clientId); String dbKey = omMetadataManager.getOzonePathKey(parentID, fileName); OmKeyInfo omKeyInfoFSO = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, objectId, parentID, txnId, Time.now()); // add key to openFileTable omKeyInfoFSO.setKeyName(fileName); - TestOMRequestUtils.addFileToKeyTable(true, false, + OMRequestTestUtils.addFileToKeyTable(true, false, fileName, omKeyInfoFSO, clientId, omKeyInfoFSO.getObjectID(), omMetadataManager); @@ -130,7 +130,7 @@ public void testAddDBToBatchWithParts() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); runAddDBToBatchWithParts(volumeName, bucketName, keyName, 0); @@ -148,12 +148,12 @@ public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { String bucketName = UUID.randomUUID().toString(); String keyName = getKeyName(); - TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager); createParentPath(volumeName, bucketName); // Put an entry to delete table with the same key prior to multipart commit - OmKeyInfo prevKey = TestOMRequestUtils.createOmKeyInfo(volumeName, + OmKeyInfo prevKey = OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, @@ -210,7 +210,7 @@ private long runAddDBToBatchWithParts(String volumeName, omMultipartKeyInfo, deleteEntryCount); OmKeyInfo omKeyInfo = - TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, + OMRequestTestUtils.createOmKeyInfo(volumeName, bucketName, keyName, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE, parentID + 9, @@ -310,7 +310,7 @@ private String getKeyName() { private void createParentPath(String volumeName, String bucketName) throws Exception { // Create parent dirs for the path - parentID = TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName, + parentID = OMRequestTestUtils.addParentsToDirTable(volumeName, bucketName, dirName, omMetadataManager); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java index 9699524aea2..acae362469d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java @@ -258,7 +258,7 @@ private Iterable mockFeatures(String... names) { private Iterable mockFeatures( int startFromLV, String... names ) { - int i=startFromLV; + int i = startFromLV; List ret = new ArrayList<>(); for (String name : names) { ret.add(mockFeature(name, i)); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java index a23469a4dfc..71621894c52 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOzoneManagerPrepareState.java @@ -203,7 +203,7 @@ private void writePrepareMarkerFile(byte[] bytes) throws IOException { if (!mkdirs) { throw new IOException("Unable to create marker file directory."); } - try(FileOutputStream stream = + try (FileOutputStream stream = new FileOutputStream(markerFile)) { stream.write(bytes); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java index 670b7ee3afd..e39fe39930c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneDelegationTokenSecretManager.java @@ -91,7 +91,7 @@ public void setUp() throws Exception { s3SecretManager = new S3SecretManagerImpl(conf, metadataManager) { @Override public S3SecretValue getS3Secret(String kerberosID) { - if(s3Secrets.containsKey(kerberosID)) { + if (s3Secrets.containsKey(kerberosID)) { return new S3SecretValue(kerberosID, s3Secrets.get(kerberosID)); } return null; @@ -99,7 +99,7 @@ public S3SecretValue getS3Secret(String kerberosID) { @Override public String getS3UserSecretString(String awsAccessKey) { - if(s3Secrets.containsKey(awsAccessKey)) { + if (s3Secrets.containsKey(awsAccessKey)) { return s3Secrets.get(awsAccessKey); } return null; @@ -322,7 +322,7 @@ public void testVerifySignatureFailure() throws Exception { OzoneTokenIdentifier id = new OzoneTokenIdentifier(); // set invalid om cert serial id id.setOmCertSerialId("1927393"); - id.setMaxDate(Time.now() + 60*60*24); + id.setMaxDate(Time.now() + 60 * 60 * 24); id.setOwner(new Text("test")); Assert.assertFalse(secretManager.verifySignature(id, id.getBytes())); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 48ed205a587..bb815720857 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -218,7 +218,7 @@ public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, } long duration = Time.monotonicNowNanos() - startTime; LOG.info("Average token sign time with HmacSha256(RSA/1024 key) is {} ns", - duration/testTokenCount); + duration / testTokenCount); startTime = Time.monotonicNowNanos(); for (int i = 0; i < testTokenCount; i++) { @@ -226,7 +226,7 @@ public void testAsymmetricTokenPerf() throws NoSuchAlgorithmException, } duration = Time.monotonicNowNanos() - startTime; LOG.info("Average token verify time with HmacSha256(RSA/1024 key) " - + "is {} ns", duration/testTokenCount); + + "is {} ns", duration / testTokenCount); } @Test @@ -273,7 +273,7 @@ public void testSymmetricTokenPerfHelper(String hmacAlgorithm, int keyLen) { } long duration = Time.monotonicNowNanos() - startTime; LOG.info("Average token sign time with {}({} symmetric key) is {} ns", - hmacAlgorithm, keyLen, duration/testTokenCount); + hmacAlgorithm, keyLen, duration / testTokenCount); } /* diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index 46774fe443a..868899d8300 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -21,25 +21,23 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.BucketManagerImpl; -import org.apache.hadoop.ozone.om.IOzoneAcl; -import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.BucketManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.PrefixManager; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.VolumeManagerImpl; +import org.apache.hadoop.ozone.om.VolumeManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.security.UserGroupInformation; @@ -78,8 +76,6 @@ import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; import static org.junit.Assert.*; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; /** * Test class for {@link OzoneNativeAuthorizer}. @@ -96,9 +92,10 @@ public class TestOzoneNativeAuthorizer { private ACLType parentDirGroupAcl; private boolean expectedAclResult; - private static KeyManagerImpl keyManager; - private static VolumeManagerImpl volumeManager; - private static BucketManagerImpl bucketManager; + private static OzoneManagerProtocol writeClient; + private static KeyManager keyManager; + private static VolumeManager volumeManager; + private static BucketManager bucketManager; private static PrefixManager prefixManager; private static OMMetadataManager metadataManager; private static OzoneNativeAuthorizer nativeAuthorizer; @@ -150,14 +147,14 @@ public static void setup() throws Exception { ozConfig.set(OZONE_METADATA_DIRS, dir.toString()); ozConfig.set(OZONE_ADMINISTRATORS, "om"); - metadataManager = new OmMetadataManagerImpl(ozConfig); - volumeManager = new VolumeManagerImpl(metadataManager, ozConfig); - bucketManager = new BucketManagerImpl(metadataManager); - prefixManager = new PrefixManagerImpl(metadataManager, false); - - keyManager = new KeyManagerImpl(mock(ScmBlockLocationProtocol.class), - metadataManager, ozConfig, "om1", null); - + OmTestManagers omTestManagers = + new OmTestManagers(ozConfig); + metadataManager = omTestManagers.getMetadataManager(); + volumeManager = omTestManagers.getVolumeManager(); + bucketManager = omTestManagers.getBucketManager(); + prefixManager = omTestManagers.getPrefixManager(); + keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); nativeAuthorizer = new OzoneNativeAuthorizer(volumeManager, bucketManager, keyManager, prefixManager, Collections.singletonList("om")); @@ -181,14 +178,14 @@ private void createKey(String volume, .build(); if (keyName.split(OZONE_URI_DELIMITER).length > 1) { - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); key = key + OZONE_URI_DELIMITER; } else { - OpenKeySession keySession = keyManager.createFile(keyArgs, true, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, true, false); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations() .getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); } keyObj = new OzoneObjInfo.Builder() @@ -206,7 +203,7 @@ private void createBucket(String volumeName, String bucketName) .setVolumeName(volumeName) .setBucketName(bucketName) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); buckObj = new OzoneObjInfo.Builder() .setVolumeName(vol) .setBucketName(buck) @@ -221,7 +218,7 @@ private void createVolume(String volumeName) throws IOException { .setAdminName(adminUgi.getUserName()) .setOwnerName(testUgi.getUserName()) .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); volObj = new OzoneObjInfo.Builder() .setVolumeName(vol) .setResType(VOLUME) @@ -232,10 +229,10 @@ private void createVolume(String volumeName) throws IOException { @Test public void testCheckAccessForVolume() throws Exception { expectedAclResult = true; - resetAclsAndValidateAccess(volObj, USER, volumeManager); - resetAclsAndValidateAccess(volObj, GROUP, volumeManager); - resetAclsAndValidateAccess(volObj, WORLD, volumeManager); - resetAclsAndValidateAccess(volObj, ANONYMOUS, volumeManager); + resetAclsAndValidateAccess(volObj, USER, writeClient); + resetAclsAndValidateAccess(volObj, GROUP, writeClient); + resetAclsAndValidateAccess(volObj, WORLD, writeClient); + resetAclsAndValidateAccess(volObj, ANONYMOUS, writeClient); } @Test @@ -251,10 +248,10 @@ public void testCheckAccessForBucket() throws Exception { setVolumeAcl(Arrays.asList(userAcl, groupAcl)); - resetAclsAndValidateAccess(buckObj, USER, bucketManager); - resetAclsAndValidateAccess(buckObj, GROUP, bucketManager); - resetAclsAndValidateAccess(buckObj, WORLD, bucketManager); - resetAclsAndValidateAccess(buckObj, ANONYMOUS, bucketManager); + resetAclsAndValidateAccess(buckObj, USER, writeClient); + resetAclsAndValidateAccess(buckObj, GROUP, writeClient); + resetAclsAndValidateAccess(buckObj, WORLD, writeClient); + resetAclsAndValidateAccess(buckObj, ANONYMOUS, writeClient); } @Test @@ -269,10 +266,10 @@ public void testCheckAccessForKey() throws Exception { setVolumeAcl(Arrays.asList(userAcl, groupAcl)); setBucketAcl(Arrays.asList(userAcl, groupAcl)); - resetAclsAndValidateAccess(keyObj, USER, keyManager); - resetAclsAndValidateAccess(keyObj, GROUP, keyManager); - resetAclsAndValidateAccess(keyObj, WORLD, keyManager); - resetAclsAndValidateAccess(keyObj, ANONYMOUS, keyManager); + resetAclsAndValidateAccess(keyObj, USER, writeClient); + resetAclsAndValidateAccess(keyObj, GROUP, writeClient); + resetAclsAndValidateAccess(keyObj, WORLD, writeClient); + resetAclsAndValidateAccess(keyObj, ANONYMOUS, writeClient); } @Test @@ -297,10 +294,10 @@ public void testCheckAccessForPrefix() throws Exception { setBucketAcl(Arrays.asList(userAcl, groupAcl)); - resetAclsAndValidateAccess(prefixObj, USER, prefixManager); - resetAclsAndValidateAccess(prefixObj, GROUP, prefixManager); - resetAclsAndValidateAccess(prefixObj, WORLD, prefixManager); - resetAclsAndValidateAccess(prefixObj, ANONYMOUS, prefixManager); + resetAclsAndValidateAccess(prefixObj, USER, writeClient); + resetAclsAndValidateAccess(prefixObj, GROUP, writeClient); + resetAclsAndValidateAccess(prefixObj, WORLD, writeClient); + resetAclsAndValidateAccess(prefixObj, ANONYMOUS, writeClient); } @@ -347,12 +344,12 @@ private void addBucketAcl(OzoneAcl ozoneAcl) throws IOException { } private void resetAclsAndValidateAccess(OzoneObj obj, - ACLIdentityType accessType, IOzoneAcl aclImplementor) + ACLIdentityType accessType, OzoneManagerProtocol aclImplementor) throws IOException { List acls; String user = testUgi.getUserName(); String group = (testUgi.getGroups().size() > 0) ? - testUgi.getGroups().get(0): ""; + testUgi.getGroups().get(0) : ""; RequestContext.Builder builder = new RequestContext.Builder() .setClientUgi(testUgi) @@ -375,7 +372,7 @@ private void resetAclsAndValidateAccess(OzoneObj obj, // Reset acls to only one right. if (obj.getResourceType() == VOLUME) { setVolumeAcl(Collections.singletonList(newAcl)); - } else if (obj.getResourceType() == BUCKET){ + } else if (obj.getResourceType() == BUCKET) { setBucketAcl(Collections.singletonList(newAcl)); } else { aclImplementor.setAcl(obj, Collections.singletonList(newAcl)); @@ -453,7 +450,7 @@ private void resetAclsAndValidateAccess(OzoneObj obj, // only DB not cache. if (obj.getResourceType() == VOLUME) { addVolumeAcl(addAcl); - } else if (obj.getResourceType() == BUCKET){ + } else if (obj.getResourceType() == BUCKET) { addBucketAcl(addAcl); } else { aclImplementor.addAcl(obj, addAcl); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java index 48ca158b069..1f8f246a3c2 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java @@ -23,17 +23,15 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.ozone.om.BucketManagerImpl; -import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.BucketManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.PrefixManager; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.VolumeManagerImpl; +import org.apache.hadoop.ozone.om.VolumeManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -41,9 +39,11 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.BeforeClass; @@ -73,24 +73,24 @@ import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.KEY; import static org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME; import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE; -import static org.mockito.Mockito.mock; /** * Test parent acl requirements when accessing children with native authorizer. */ public class TestParentAcl { private static OzoneConfiguration ozConfig; - private static KeyManagerImpl keyManager; - private static VolumeManagerImpl volumeManager; - private static BucketManagerImpl bucketManager; + private static KeyManager keyManager; + private static VolumeManager volumeManager; + private static BucketManager bucketManager; private static PrefixManager prefixManager; private static OMMetadataManager metadataManager; private static OzoneNativeAuthorizer nativeAuthorizer; private static UserGroupInformation adminUgi; private static UserGroupInformation testUgi, testUgi1; + private static OzoneManagerProtocol writeClient; @BeforeClass - public static void setup() throws IOException { + public static void setup() throws IOException, AuthenticationException { ozConfig = new OzoneConfiguration(); ozConfig.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); @@ -98,14 +98,14 @@ public static void setup() throws IOException { ozConfig.set(OZONE_METADATA_DIRS, dir.toString()); ozConfig.set(OZONE_ADMINISTRATORS, "om"); - metadataManager = new OmMetadataManagerImpl(ozConfig); - volumeManager = new VolumeManagerImpl(metadataManager, ozConfig); - bucketManager = new BucketManagerImpl(metadataManager); - prefixManager = new PrefixManagerImpl(metadataManager, false); - - keyManager = new KeyManagerImpl(mock(ScmBlockLocationProtocol.class), - metadataManager, ozConfig, "om1", null); - + OmTestManagers omTestManagers = + new OmTestManagers(ozConfig); + metadataManager = omTestManagers.getMetadataManager(); + volumeManager = omTestManagers.getVolumeManager(); + bucketManager = omTestManagers.getBucketManager(); + prefixManager = omTestManagers.getPrefixManager(); + keyManager = omTestManagers.getKeyManager(); + writeClient = omTestManagers.getWriteClient(); nativeAuthorizer = new OzoneNativeAuthorizer(volumeManager, bucketManager, keyManager, prefixManager, Collections.singletonList("om")); @@ -357,7 +357,7 @@ private static OzoneObjInfo createVolume(String volumeName) .setAdminName(adminUgi.getUserName()) .setOwnerName(testUgi.getUserName()) .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); return new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setResType(VOLUME) @@ -371,7 +371,7 @@ private static OzoneObjInfo createBucket(String volumeName, String bucketName) .setVolumeName(volumeName) .setBucketName(bucketName) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); return new OzoneObjInfo.Builder() .setVolumeName(volumeName) .setBucketName(bucketName) @@ -396,13 +396,13 @@ private OzoneObjInfo createKey(String volume, String bucket, String keyName) if (keyName.split(OZONE_URI_DELIMITER).length > 1) { - keyManager.createDirectory(keyArgs); + writeClient.createDirectory(keyArgs); } else { - OpenKeySession keySession = keyManager.createFile(keyArgs, true, false); + OpenKeySession keySession = writeClient.createFile(keyArgs, true, false); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations() .getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); } return new OzoneObjInfo.Builder() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java index 06516e98286..3f73debd658 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java @@ -21,21 +21,21 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; -import org.apache.hadoop.ozone.om.BucketManagerImpl; -import org.apache.hadoop.ozone.om.KeyManagerImpl; +import org.apache.hadoop.ozone.om.BucketManager; +import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMMetadataManager; -import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmTestManagers; import org.apache.hadoop.ozone.om.PrefixManager; -import org.apache.hadoop.ozone.om.PrefixManagerImpl; -import org.apache.hadoop.ozone.om.VolumeManagerImpl; +import org.apache.hadoop.ozone.om.VolumeManager; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil; -import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.BeforeClass; @@ -54,7 +54,6 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.ALL; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.CREATE; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.NONE; -import static org.mockito.Mockito.mock; /** @@ -64,28 +63,30 @@ public class TestVolumeOwner { private static OzoneConfiguration ozoneConfig; private static OzoneNativeAuthorizer nativeAuthorizer; - private static KeyManagerImpl keyManager; - private static VolumeManagerImpl volumeManager; - private static BucketManagerImpl bucketManager; + private static KeyManager keyManager; + private static VolumeManager volumeManager; + private static BucketManager bucketManager; private static PrefixManager prefixManager; private static OMMetadataManager metadataManager; private static UserGroupInformation testUgi; + private static OzoneManagerProtocol writeClient; @BeforeClass - public static void setup() throws IOException { + public static void setup() throws IOException, AuthenticationException { ozoneConfig = new OzoneConfiguration(); ozoneConfig.set(OZONE_ACL_AUTHORIZER_CLASS, OZONE_ACL_AUTHORIZER_CLASS_NATIVE); File dir = GenericTestUtils.getRandomizedTestDir(); ozoneConfig.set(OZONE_METADATA_DIRS, dir.toString()); - metadataManager = new OmMetadataManagerImpl(ozoneConfig); - volumeManager = new VolumeManagerImpl(metadataManager, ozoneConfig); - bucketManager = new BucketManagerImpl(metadataManager); - keyManager = new KeyManagerImpl(mock(ScmBlockLocationProtocol.class), - metadataManager, ozoneConfig, "om1", null); - prefixManager = new PrefixManagerImpl(metadataManager, false); - + OmTestManagers omTestManagers = + new OmTestManagers(ozoneConfig); + metadataManager = omTestManagers.getMetadataManager(); + volumeManager = omTestManagers.getVolumeManager(); + bucketManager = omTestManagers.getBucketManager(); + keyManager = omTestManagers.getKeyManager(); + prefixManager = omTestManagers.getPrefixManager(); + writeClient = omTestManagers.getWriteClient(); nativeAuthorizer = new OzoneNativeAuthorizer(volumeManager, bucketManager, keyManager, prefixManager, Collections.singletonList("om")); @@ -106,7 +107,7 @@ private static void prepareTestVols() throws IOException { .setAdminName("om") .setOwnerName(getTestVolOwnerName(i)) .build(); - TestOMRequestUtils.addVolumeToOM(metadataManager, volumeArgs); + OMRequestTestUtils.addVolumeToOM(metadataManager, volumeArgs); } } @@ -118,7 +119,7 @@ private static void prepareTestBuckets() throws IOException { .setVolumeName(getTestVolumeName(i)) .setBucketName(getTestBucketName(j)) .build(); - TestOMRequestUtils.addBucketToOM(metadataManager, bucketInfo); + OMRequestTestUtils.addBucketToOM(metadataManager, bucketInfo); } } } @@ -144,12 +145,12 @@ private static void prepareTestKeys() throws IOException { testUgi.getUserName(), testUgi.getGroupNames(), NONE, NONE)); } OmKeyArgs keyArgs = keyArgsBuilder.build(); - OpenKeySession keySession = keyManager.createFile(keyArgs, true, + OpenKeySession keySession = writeClient.createFile(keyArgs, true, false); keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations() .getLocationList()); - keyManager.commitKey(keyArgs, keySession.getId()); + writeClient.commitKey(keyArgs, keySession.getId()); } } } @@ -186,7 +187,7 @@ public void testVolumeOps() throws Exception { List aclsToTest = Arrays.stream(IAccessAuthorizer.ACLType.values()).filter( - (type)-> type != NONE && type != CREATE) + (type) -> type != NONE && type != CREATE) .collect(Collectors.toList()); for (IAccessAuthorizer.ACLType type: aclsToTest) { nonAdminOwnerContext = getUserRequestContext(getTestVolOwnerName(0), @@ -295,6 +296,6 @@ private OzoneObj getTestKeyobj(int volIndex, int bucketIndex, List getAclsToTest() { return Arrays.stream(IAccessAuthorizer.ACLType.values()).filter( - (type)-> type != NONE).collect(Collectors.toList()); + (type) -> type != NONE).collect(Collectors.toList()); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 2c3a465ebd6..4c09a81d274 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -342,7 +342,7 @@ public FileStatusAdapter getFileStatus(String key, URI uri, @Override - public Iterator listKeys(String pathKey) throws IOException{ + public Iterator listKeys(String pathKey) throws IOException { incrementCounter(Statistic.OBJECTS_LIST, 1); return new IteratorAdapter(bucket.listKeys(pathKey)); } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index c920747b70e..c8549f0023b 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -285,7 +285,7 @@ private class RenameIterator extends OzoneListingIterator { @Override boolean processKey(List keyList) throws IOException { // TODO RenameKey needs to be changed to batch operation - for(String key : keyList) { + for (String key : keyList) { String newKeyName = dstKey.concat(key.substring(srcKey.length())); adapter.renameKey(key, newKeyName); } @@ -512,7 +512,7 @@ public boolean delete(Path f, boolean recursive) throws IOException { if (adapter.isFSOptimizedBucket()) { if (f.isRoot()) { - if (!recursive && listStatus(f).length!=0){ + if (!recursive && listStatus(f).length != 0) { throw new PathIsNotEmptyDirectoryException(f.toString()); } LOG.warn("Cannot delete root directory."); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java index 5c319173d6c..14983dc7491 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientUtils.java @@ -31,7 +31,7 @@ * Shared Utilities for Ozone FS and related classes. */ public final class OzoneClientUtils { - private OzoneClientUtils(){ + private OzoneClientUtils() { // Not used. } public static BucketLayout resolveLinkBucketLayout(OzoneBucket bucket, diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index d7888a5013d..918640799c7 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -101,7 +101,7 @@ public int available() throws IOException { */ @Override public int read(ByteBuffer buf) throws IOException { - if (buf.isReadOnly()){ + if (buf.isReadOnly()) { throw new ReadOnlyBufferException(); } diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java index 55069ce54a7..a3675dcbe77 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/codegen/SqlDbUtils.java @@ -61,7 +61,7 @@ public static void createNewDerbyDatabase(String jdbcUrl, String schemaName) System.setProperty("derby.stream.error.method", DERBY_DISABLE_LOG_METHOD); Class.forName(DERBY_DRIVER_CLASS); - try(Connection connection = DriverManager.getConnection(jdbcUrl + try (Connection connection = DriverManager.getConnection(jdbcUrl + ";user=" + schemaName + ";create=true")) { LOG.info("Created derby database at {}.", jdbcUrl); @@ -72,7 +72,7 @@ public static void createNewDerbyDatabase(String jdbcUrl, String schemaName) * Used to suppress embedded derby database logging. * @return No-Op output stream. */ - public static OutputStream disableDerbyLogFile(){ + public static OutputStream disableDerbyLogFile() { return new OutputStream() { @Override public void write(int b) throws IOException { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java index 2eeb3235af3..5a013dc9c93 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconConstants.java @@ -41,6 +41,8 @@ private ReconConstants() { public static final String CONTAINER_KEY_COUNT_TABLE = "containerKeyCountTable"; + public static final String RECON_SCM_SNAPSHOT_DB = "scm.snapshot.db"; + // By default, limit the number of results returned public static final String DEFAULT_FETCH_COUNT = "1000"; public static final String DEFAULT_BATCH_NUMBER = "1"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 0cbb9c0e673..d23ffe913f0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -96,6 +96,13 @@ public final class ReconServerConfigKeys { public static final String RECON_OM_SNAPSHOT_TASK_FLUSH_PARAM = "recon.om.snapshot.task.flush.param"; + public static final String RECON_OM_DELTA_UPDATE_LIMIT = + "recon.om.delta.update.limit"; + public static final long RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT = 2000; + public static final String RECON_OM_DELTA_UPDATE_LOOP_LIMIT = + "recon.om.delta.update.loop.limit"; + public static final int RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT = 10; + public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY = "ozone.recon.task.thread.count"; public static final int OZONE_RECON_TASK_THREAD_COUNT_DEFAULT = 5; @@ -120,6 +127,22 @@ public final class ReconServerConfigKeys { public static final String OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "10s"; + public static final String OZONE_RECON_SCM_CONTAINER_THRESHOLD = + "ozone.recon.scm.container.threshold"; + public static final int OZONE_RECON_SCM_CONTAINER_THRESHOLD_DEFAULT = 100; + + public static final String OZONE_RECON_SCM_SNAPSHOT_ENABLED = + "ozone.recon.scm.snapshot.enabled"; + public static final boolean OZONE_RECON_SCM_SNAPSHOT_ENABLED_DEFAULT = false; + + public static final String OZONE_RECON_SCM_CONNECTION_TIMEOUT = + "ozone.recon.scm.connection.timeout"; + public static final String OZONE_RECON_SCM_CONNECTION_TIMEOUT_DEFAULT = "5s"; + + public static final String OZONE_RECON_SCM_CONNECTION_REQUEST_TIMEOUT = + "ozone.recon.scm.connection.request.timeout"; + public static final String + OZONE_RECON_SCM_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "5s"; /** * Private constructor for utility class. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java index 327e9b19266..eec33466b6f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java @@ -93,7 +93,7 @@ public void getMetricsResponse( ) { final ByteBuffer buffer = ByteBuffer.allocateDirect(16 * 1024); - while(inputChannel.read(buffer) != -1) { + while (inputChannel.read(buffer) != -1) { buffer.flip(); outputChannel.write(buffer); buffer.compact(); @@ -101,7 +101,7 @@ public void getMetricsResponse( buffer.flip(); - while(buffer.hasRemaining()) { + while (buffer.hasRemaining()) { outputChannel.write(buffer); } } finally { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java index cbcd9cac864..ac34c58f97b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NSSummaryEndpoint.java @@ -449,7 +449,7 @@ public Response getQuotaUsage(@QueryParam("path") String path) for (OmVolumeArgs volume: volumes) { final long quota = volume.getQuotaInBytes(); - assert(quota >= -1L); + assert (quota >= -1L); if (quota == -1L) { // If one volume has unlimited quota, the "root" quota is unlimited. quotaInBytes = -1L; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java index c7e5cc71a11..d475be4921f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/PipelineEndpoint.java @@ -116,7 +116,7 @@ public Response getPipelines() { // ex. group id of 48981bf7-8bea-4fbd-9857-79df51ee872d // is group-79DF51EE872D String[] splits = pipelineId.toString().split("-"); - String groupId = "group-" + splits[splits.length-1].toUpperCase(); + String groupId = "group-" + splits[splits.length - 1].toUpperCase(); Optional leaderElectionCount = getMetricValue( "ratis_leader_election_electionCount", groupId); leaderElectionCount.ifPresent(pipelineBuilder::setLeaderElections); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java index b605c0fc936..fdf493f168d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/codec/NSSummaryCodec.java @@ -82,7 +82,7 @@ public NSSummary fromPersistedFormat(byte[] rawData) throws IOException { res.setNumOfFiles(in.readInt()); res.setSizeOfFiles(in.readLong()); short len = in.readShort(); - assert(len == (short) ReconConstants.NUM_OF_BINS); + assert (len == (short) ReconConstants.NUM_OF_BINS); int[] fileSizeBucket = new int[len]; for (int i = 0; i < len; ++i) { fileSizeBucket[i] = in.readInt(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index b238278f509..d29bf5074a5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -81,6 +81,7 @@ public ContainerHealthTask( public synchronized void run() { try { while (canRun()) { + wait(interval); long start = Time.monotonicNow(); long currentTime = System.currentTimeMillis(); long existingCount = processExistingDBRecords(currentTime); @@ -97,7 +98,6 @@ public synchronized void run() { " processing {} containers.", Time.monotonicNow() - start, containers.size()); processedContainers.clear(); - wait(interval); } } catch (Throwable t) { LOG.error("Exception in Missing Container task Thread.", t); @@ -144,7 +144,7 @@ private long processExistingDBRecords(long currentTime) { containerHealthSchemaManager.getAllUnhealthyRecordsCursor()) { ContainerHealthStatus currentContainer = null; Set existingRecords = new HashSet<>(); - while(cursor.hasNext()) { + while (cursor.hasNext()) { recordCount++; UnhealthyContainersRecord rec = cursor.fetchNext(); try { @@ -259,7 +259,7 @@ public static class ContainerHealthRecords { public static boolean retainOrUpdateRecord( ContainerHealthStatus container, UnhealthyContainersRecord rec) { boolean returnValue = false; - switch(UnHealthyContainerStates.valueOf(rec.getContainerState())) { + switch (UnHealthyContainerStates.valueOf(rec.getContainerState())) { case MISSING: returnValue = container.isMissing(); break; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index 8e1516278ed..b8d7be04808 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -150,7 +150,7 @@ public void checkAndAddNewContainerBatch( existContainers = containers.get(true); } List noExistContainers = null; - if (containers.containsKey(false)){ + if (containers.containsKey(false)) { noExistContainers = containers.get(false).parallelStream(). map(ContainerReplicaProto::getContainerID) .collect(Collectors.toList()); @@ -178,7 +178,7 @@ public void checkAndAddNewContainerBatch( ContainerReplicaProto.State crpState = crp.getState(); try { checkContainerStateAndUpdate(cID, crpState); - } catch (Exception ioe){ + } catch (Exception ioe) { LOG.error("Exception while " + "checkContainerStateAndUpdate container", ioe); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 0f6d79570f0..41cdc7aa8e0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -26,6 +26,7 @@ import java.util.Set; import java.util.UUID; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto; @@ -294,4 +295,14 @@ private boolean needUpdate(DatanodeDetails datanodeDetails, return currentTime - getLastHeartbeat(datanodeDetails) >= reconDatanodeOutdatedTime; } + + public void reinitialize(Table nodeTable) { + this.nodeDB = nodeTable; + loadExistingNodes(); + } + + @VisibleForTesting + public long getNodeDBKeyCount() throws IOException { + return nodeDB.getEstimatedKeyCount(); + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java index f390ed72f9c..02f27518ce7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineFactory.java @@ -43,7 +43,7 @@ static class ReconPipelineProvider extends PipelineProvider { @Override - public Pipeline create(ReplicationConfig config){ + public Pipeline create(ReplicationConfig config) { // We don't expect this to be called at all. But adding this as a red // flag for troubleshooting. throw new UnsupportedOperationException( diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index d0b0d81efdd..53a45dcd8a4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -18,14 +18,18 @@ package org.apache.hadoop.ozone.recon.scm; +import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; +import java.util.UUID; +import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.block.BlockManager; import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; @@ -59,9 +63,15 @@ import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.db.DBCheckpoint; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.Table.KeyValue; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.ozone.recon.ReconServerConfigKeys; import org.apache.hadoop.ozone.recon.fsck.ContainerHealthTask; import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager; import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; @@ -70,6 +80,8 @@ import com.google.inject.Inject; import static org.apache.hadoop.hdds.recon.ReconConfigKeys.RECON_SCM_CONFIG_PREFIX; import static org.apache.hadoop.hdds.scm.server.StorageContainerManager.buildRpcServerStartMessage; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; + import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -90,11 +102,11 @@ public class ReconStorageContainerManagerFacade private final EventQueue eventQueue; private final SCMContext scmContext; private final SCMStorageConfig scmStorageConfig; - private final DBStore dbStore; private final SCMNodeDetails reconNodeDetails; private final SCMHAManager scmhaManager; private final SequenceIdGenerator sequenceIdGen; + private DBStore dbStore; private ReconNodeManager nodeManager; private ReconPipelineManager pipelineManager; private ReconContainerManager containerManager; @@ -249,7 +261,15 @@ public void start() { "Recon ScmDatanodeProtocol RPC server", getDatanodeProtocolServer().getDatanodeRpcAddress())); } - initializePipelinesFromScm(); + boolean isSCMSnapshotEnabled = ozoneConfiguration.getBoolean( + ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, + ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED_DEFAULT); + if (isSCMSnapshotEnabled) { + initializeSCMDB(); + LOG.info("SCM DB initialized"); + } else { + initializePipelinesFromScm(); + } getDatanodeProtocolServer().start(); this.reconScmTasks.forEach(ReconScmTask::start); } @@ -307,6 +327,111 @@ private void initializePipelinesFromScm() { } } + private void initializeSCMDB() { + try { + long scmContainersCount = scmServiceProvider.getContainerCount(); + long reconContainerCount = containerManager.getContainers().size(); + long threshold = ozoneConfiguration.getInt( + ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, + ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD_DEFAULT); + + if (Math.abs(scmContainersCount - reconContainerCount) > threshold) { + LOG.info("Recon Container Count: {}, SCM Container Count: {}", + reconContainerCount, scmContainersCount); + updateReconSCMDBWithNewSnapshot(); + LOG.info("Updated Recon DB with SCM DB"); + } else { + initializePipelinesFromScm(); + } + } catch (IOException e) { + LOG.error("Exception encountered while getting SCM DB."); + } + } + + public void updateReconSCMDBWithNewSnapshot() throws IOException { + DBCheckpoint dbSnapshot = scmServiceProvider.getSCMDBSnapshot(); + if (dbSnapshot != null && dbSnapshot.getCheckpointLocation() != null) { + LOG.info("Got new checkpoint from SCM : " + + dbSnapshot.getCheckpointLocation()); + try { + initializeNewRdbStore(dbSnapshot.getCheckpointLocation().toFile()); + } catch (IOException e) { + LOG.error("Unable to refresh Recon SCM DB Snapshot. ", e); + } + } else { + LOG.error("Null snapshot location got from SCM."); + } + } + + private void deleteOldSCMDB() throws IOException { + if (dbStore != null) { + File oldDBLocation = dbStore.getDbLocation(); + if (oldDBLocation.exists()) { + LOG.info("Cleaning up old SCM snapshot db at {}.", + oldDBLocation.getAbsolutePath()); + FileUtils.deleteDirectory(oldDBLocation); + } + } + } + + private void initializeNewRdbStore(File dbFile) throws IOException { + try { + DBStore newStore = createDBAndAddSCMTablesAndCodecs( + dbFile, new ReconSCMDBDefinition()); + Table nodeTable = + ReconSCMDBDefinition.NODES.getTable(dbStore); + Table newNodeTable = + ReconSCMDBDefinition.NODES.getTable(newStore); + TableIterator> iterator = nodeTable.iterator(); + while (iterator.hasNext()) { + KeyValue keyValue = iterator.next(); + newNodeTable.put(keyValue.getKey(), keyValue.getValue()); + } + sequenceIdGen.reinitialize( + ReconSCMDBDefinition.SEQUENCE_ID.getTable(newStore)); + pipelineManager.reinitialize( + ReconSCMDBDefinition.PIPELINES.getTable(newStore)); + containerManager.reinitialize( + ReconSCMDBDefinition.CONTAINERS.getTable(newStore)); + nodeManager.reinitialize( + ReconSCMDBDefinition.NODES.getTable(newStore)); + deleteOldSCMDB(); + setDbStore(newStore); + File newDb = new File(dbFile.getParent() + + OZONE_URI_DELIMITER + ReconSCMDBDefinition.RECON_SCM_DB_NAME); + boolean success = dbFile.renameTo(newDb); + if (success) { + LOG.info("SCM snapshot linked to Recon DB."); + } + LOG.info("Created SCM DB handle from snapshot at {}.", + dbFile.getAbsolutePath()); + } catch (IOException ioEx) { + LOG.error("Unable to initialize Recon SCM DB snapshot store.", ioEx); + } + } + + private DBStore createDBAndAddSCMTablesAndCodecs(File dbFile, + ReconSCMDBDefinition definition) throws IOException { + DBStoreBuilder dbStoreBuilder = + DBStoreBuilder.newBuilder(ozoneConfiguration) + .setName(dbFile.getName()) + .setPath(dbFile.toPath().getParent()); + for (DBColumnFamilyDefinition columnFamily : + definition.getColumnFamilies()) { + dbStoreBuilder.addTable(columnFamily.getName()); + dbStoreBuilder.addCodec(columnFamily.getKeyType(), + columnFamily.getKeyCodec()); + dbStoreBuilder.addCodec(columnFamily.getValueType(), + columnFamily.getValueCodec()); + } + return dbStoreBuilder.build(); + } + + public void setDbStore(DBStore dbStore) { + this.dbStore = dbStore; + } + @Override public NodeManager getScmNodeManager() { return nodeManager; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java index be2c7cb6e7b..cf57937a15e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.utils.db.DBCheckpoint; /** * Interface to access SCM endpoints. @@ -66,4 +67,15 @@ List getExistContainerWithPipelinesInBatch( */ List getNodes() throws IOException; + /** + * Requests SCM for container count. + * @return Total number of containers in SCM. + */ + long getContainerCount() throws IOException; + + /** + * Requests SCM for DB Snapshot. + * @return DBCheckpoint from SCM. + */ + DBCheckpoint getSCMDBSnapshot(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java index 9625db65d10..1ebeeddb2b5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/ContainerKeyPrefixCodec.java @@ -34,7 +34,7 @@ /** * Codec to encode ContainerKeyPrefix as byte array. */ -public class ContainerKeyPrefixCodec implements Codec{ +public class ContainerKeyPrefixCodec implements Codec { private static final String KEY_DELIMITER = "_"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 96263e0882b..08612e3f8ae 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -58,7 +58,7 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH; -import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HTTP_AUTH_TYPE; import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_OM_SNAPSHOT_DB; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; @@ -71,6 +71,10 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INITIAL_DELAY_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT; import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; @@ -105,6 +109,9 @@ public class OzoneManagerServiceProviderImpl private ReconUtils reconUtils; private OzoneManagerSyncMetrics metrics; + private long deltaUpdateLimit; + private int deltaUpdateLoopLimit; + /** * OM Snapshot related task names. */ @@ -145,17 +152,23 @@ public OzoneManagerServiceProviderImpl( String ozoneManagerHttpsAddress = configuration.get(OMConfigKeys .OZONE_OM_HTTPS_ADDRESS_KEY); + long deltaUpdateLimits = configuration.getLong(RECON_OM_DELTA_UPDATE_LIMIT, + RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT); + int deltaUpdateLoopLimits = configuration.getInt( + RECON_OM_DELTA_UPDATE_LOOP_LIMIT, + RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT); + omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR); HttpConfig.Policy policy = HttpConfig.getHttpPolicy(configuration); omDBSnapshotUrl = "http://" + ozoneManagerHttpAddress + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; if (policy.isHttpsEnabled()) { omDBSnapshotUrl = "https://" + ozoneManagerHttpsAddress + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; } boolean flushParam = configuration.getBoolean( @@ -176,6 +189,8 @@ public OzoneManagerServiceProviderImpl( this.ozoneManagerClient = ozoneManagerClient; this.configuration = configuration; this.metrics = OzoneManagerSyncMetrics.create(); + this.deltaUpdateLimit = deltaUpdateLimits; + this.deltaUpdateLoopLimit = deltaUpdateLoopLimits; } public void registerOMDBTasks() { @@ -183,7 +198,7 @@ public void registerOMDBTasks() { OmSnapshotTaskName.OmDeltaRequest.name(), System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); if (!reconTaskStatusDao.existsById( - OmSnapshotTaskName.OmDeltaRequest.name())){ + OmSnapshotTaskName.OmDeltaRequest.name())) { reconTaskStatusDao.insert(reconTaskStatusRecord); LOG.info("Registered {} task ", OmSnapshotTaskName.OmDeltaRequest.name()); @@ -193,7 +208,7 @@ public void registerOMDBTasks() { OmSnapshotTaskName.OmSnapshotRequest.name(), System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); if (!reconTaskStatusDao.existsById( - OmSnapshotTaskName.OmSnapshotRequest.name())){ + OmSnapshotTaskName.OmSnapshotRequest.name())) { reconTaskStatusDao.insert(reconTaskStatusRecord); LOG.info("Registered {} task ", OmSnapshotTaskName.OmSnapshotRequest.name()); @@ -271,7 +286,7 @@ public String getOzoneManagerSnapshotUrl() throws IOException { omLeaderUrl = (policy.isHttpsEnabled() ? "https://" + info.getServiceAddress(Type.HTTPS) : "http://" + info.getServiceAddress(Type.HTTP)) + - OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; } } } @@ -356,8 +371,38 @@ boolean updateReconOmDBWithNewSnapshot() throws IOException { void getAndApplyDeltaUpdatesFromOM( long fromSequenceNumber, OMDBUpdatesHandler omdbUpdatesHandler) throws IOException, RocksDBException { + int loopCount = 0; + long originalFromSequenceNumber = fromSequenceNumber; + long resultCount = Long.MAX_VALUE; + while (loopCount < deltaUpdateLoopLimit && + resultCount >= deltaUpdateLimit) { + resultCount = innerGetAndApplyDeltaUpdatesFromOM(fromSequenceNumber, + omdbUpdatesHandler); + fromSequenceNumber += resultCount; + loopCount++; + } + LOG.info("Delta updates received from OM : {} loops, {} records", loopCount, + fromSequenceNumber - originalFromSequenceNumber + ); + } + + /** + * Get Delta updates from OM through RPC call and apply to local OM DB as + * well as accumulate in a buffer. + * @param fromSequenceNumber from sequence number to request from. + * @param omdbUpdatesHandler OM DB updates handler to buffer updates. + * @throws IOException when OM RPC request fails. + * @throws RocksDBException when writing to RocksDB fails. + */ + @VisibleForTesting + long innerGetAndApplyDeltaUpdatesFromOM(long fromSequenceNumber, + OMDBUpdatesHandler omdbUpdatesHandler) + throws IOException, RocksDBException { + int recordCount = 0; DBUpdatesRequest dbUpdatesRequest = DBUpdatesRequest.newBuilder() - .setSequenceNumber(fromSequenceNumber).build(); + .setSequenceNumber(fromSequenceNumber) + .setLimitCount(deltaUpdateLimit) + .build(); DBUpdates dbUpdates = ozoneManagerClient.getDBUpdates(dbUpdatesRequest); if (null != dbUpdates) { RDBStore rocksDBStore = (RDBStore) omMetadataManager.getStore(); @@ -366,6 +411,7 @@ void getAndApplyDeltaUpdatesFromOM( LOG.info("Number of updates received from OM : {}", numUpdates); if (numUpdates > 0) { metrics.incrNumUpdatesInDeltaTotal(numUpdates); + recordCount = numUpdates; } for (byte[] data : dbUpdates.getData()) { try (WriteBatch writeBatch = new WriteBatch(data)) { @@ -379,6 +425,7 @@ void getAndApplyDeltaUpdatesFromOM( } } } + return recordCount; } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java index 1e609e80720..8f83c66d5c2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/StorageContainerServiceProviderImpl.java @@ -18,18 +18,48 @@ package org.apache.hadoop.ozone.recon.spi.impl; +import static org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig.ConfigStrings.HDDS_SCM_HTTP_AUTH_TYPE; import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION; +import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_SNAPSHOT_DB; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CONNECTION_REQUEST_TIMEOUT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CONNECTION_REQUEST_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CONNECTION_TIMEOUT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_CONNECTION_TIMEOUT_DEFAULT; +import java.io.File; import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Path; +import java.nio.file.Paths; import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; import javax.inject.Inject; +import org.apache.commons.io.FileUtils; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.ha.InterSCMGrpcClient; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; +import org.apache.hadoop.hdds.scm.ha.SCMSnapshotDownloader; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.hdds.security.x509.certificate.client.SCMCertificateClient; +import org.apache.hadoop.hdds.server.http.HttpConfig; +import org.apache.hadoop.hdds.utils.db.DBCheckpoint; +import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.ratis.proto.RaftProtos; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Implementation for StorageContainerServiceProvider that talks with actual @@ -38,12 +68,53 @@ public class StorageContainerServiceProviderImpl implements StorageContainerServiceProvider { + private static final Logger LOG = + LoggerFactory.getLogger(StorageContainerServiceProviderImpl.class); private StorageContainerLocationProtocol scmClient; + private final OzoneConfiguration configuration; + private String scmDBSnapshotUrl; + private File scmSnapshotDBParentDir; + private URLConnectionFactory connectionFactory; + private ReconUtils reconUtils; @Inject public StorageContainerServiceProviderImpl( - StorageContainerLocationProtocol scmClient) { + StorageContainerLocationProtocol scmClient, + ReconUtils reconUtils, + OzoneConfiguration configuration) { + + int connectionTimeout = (int) configuration.getTimeDuration( + OZONE_RECON_SCM_CONNECTION_TIMEOUT, + OZONE_RECON_SCM_CONNECTION_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); + int connectionRequestTimeout = (int) configuration.getTimeDuration( + OZONE_RECON_SCM_CONNECTION_REQUEST_TIMEOUT, + OZONE_RECON_SCM_CONNECTION_REQUEST_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + connectionFactory = + URLConnectionFactory.newDefaultURLConnectionFactory(connectionTimeout, + connectionRequestTimeout, configuration); + + String scmHttpAddress = configuration.get(ScmConfigKeys + .OZONE_SCM_HTTP_ADDRESS_KEY); + + String scmHttpsAddress = configuration.get(ScmConfigKeys + .OZONE_SCM_HTTPS_ADDRESS_KEY); + + HttpConfig.Policy policy = HttpConfig.getHttpPolicy(configuration); + + scmSnapshotDBParentDir = ReconUtils.getReconScmDbDir(configuration); + + scmDBSnapshotUrl = "http://" + scmHttpAddress + + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; + + if (policy.isHttpsEnabled()) { + scmDBSnapshotUrl = "https://" + scmHttpsAddress + + OZONE_DB_CHECKPOINT_HTTP_ENDPOINT; + } + + this.reconUtils = reconUtils; this.scmClient = scmClient; + this.configuration = configuration; } @Override @@ -74,4 +145,70 @@ public List getNodes() throws IOException { return scmClient.queryNode(null, null, HddsProtos.QueryScope.CLUSTER, "", CURRENT_VERSION); } + + @Override + public long getContainerCount() throws IOException { + return scmClient.getContainerCount(); + } + + public String getScmDBSnapshotUrl() { + return scmDBSnapshotUrl; + } + + private boolean isOmSpnegoEnabled() { + return configuration.get(HDDS_SCM_HTTP_AUTH_TYPE, "simple") + .equals("kerberos"); + } + + public DBCheckpoint getSCMDBSnapshot() { + String snapshotFileName = RECON_SCM_SNAPSHOT_DB + "_" + + System.currentTimeMillis(); + File targetFile = new File(scmSnapshotDBParentDir, snapshotFileName + + ".tar.gz"); + + try { + if (!SCMHAUtils.isSCMHAEnabled(configuration)) { + SecurityUtil.doAsLoginUser(() -> { + try (InputStream inputStream = reconUtils.makeHttpCall( + connectionFactory, getScmDBSnapshotUrl(), + isOmSpnegoEnabled()).getInputStream()) { + FileUtils.copyInputStreamToFile(inputStream, targetFile); + } + return null; + }); + LOG.info("Downloaded SCM Snapshot from SCM"); + } else { + List ratisRoles = scmClient.getScmInfo().getRatisPeerRoles(); + for (String ratisRole: ratisRoles) { + String[] role = ratisRole.split(":"); + if (role[2].equals(RaftProtos.RaftPeerRole.LEADER.toString())) { + String hostAddress = role[4].trim(); + int grpcPort = configuration.getInt( + ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, + ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT); + + try (SCMSnapshotDownloader downloadClient = + new InterSCMGrpcClient(hostAddress, grpcPort, + configuration, new SCMCertificateClient( + new SecurityConfig(configuration)))) { + downloadClient.download(targetFile.toPath()).get(); + } catch (ExecutionException | InterruptedException e) { + LOG.error("Rocks DB checkpoint downloading failed", e); + throw new IOException(e); + } + LOG.info("Downloaded SCM Snapshot from Leader SCM"); + break; + } + } + } + Path untarredDbDir = Paths.get(scmSnapshotDBParentDir.getAbsolutePath(), + snapshotFileName); + reconUtils.untarCheckpointFile(targetFile, untarredDbDir); + FileUtils.deleteQuietly(targetFile); + return new RocksDBCheckpoint(untarredDbDir); + } catch (IOException e) { + LOG.error("Unable to obtain SCM DB Snapshot. ", e); + } + return null; + } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java index 8c390d40899..e9d8cbb7785 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java @@ -128,7 +128,7 @@ public Pair process(OMUpdateEventBatch events) { String updatedKey = omdbUpdateEvent.getKey(); OmKeyInfo omKeyInfo = omdbUpdateEvent.getValue(); - try{ + try { switch (omdbUpdateEvent.getAction()) { case PUT: handlePutKeyEvent(omKeyInfo, fileSizeCountMap); @@ -258,7 +258,7 @@ private static class FileSizeCountKey { @Override public boolean equals(Object obj) { - if(obj instanceof FileSizeCountKey) { + if (obj instanceof FileSizeCountKey) { FileSizeCountKey s = (FileSizeCountKey) obj; return volume.equals(s.volume) && bucket.equals(s.bucket) && fileSizeUpperBound.equals(s.fileSizeUpperBound); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java index 79b28feeb01..6e827c76599 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java @@ -123,7 +123,7 @@ public Pair process(OMUpdateEventBatch events) { continue; } String rowKey = getRowKeyFromTable(omdbUpdateEvent.getTable()); - try{ + try { switch (omdbUpdateEvent.getAction()) { case PUT: objectCountMap.computeIfPresent(rowKey, (k, count) -> count + 1L); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java index 9485dbf8f7f..d5da4a35a1c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java @@ -68,7 +68,7 @@ public void testGetTaskTimes() { response.getEntity(); Assert.assertEquals(resultList.size(), responseList.size()); - for(ReconTaskStatus r : responseList) { + for (ReconTaskStatus r : responseList) { Assert.assertEquals(reconTaskStatusRecord.getTaskName(), r.getTaskName()); Assert.assertEquals(reconTaskStatusRecord.getLastUpdatedTimestamp(), r.getLastUpdatedTimestamp()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index ac5aeafbc6c..db2448b3dee 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -370,7 +370,7 @@ public ContainerPlacementStatus validateContainerPlacement( } private boolean isDnPresent(List dns) { - for(DatanodeDetails dn : dns) { + for (DatanodeDetails dn : dns) { if (misRepWhenDnPresent != null && dn.getUuid().equals(misRepWhenDnPresent)) { return true; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java index ab21a353268..59689237c6d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java @@ -94,7 +94,7 @@ public void testReconSchemaCreated() throws Exception { new ImmutablePair<>("count", Types.BIGINT)); List> actualPairsFileCount = new ArrayList<>(); - while(resultSetFileCount.next()) { + while (resultSetFileCount.next()) { actualPairsFileCount.add(new ImmutablePair<>(resultSetFileCount.getString( "COLUMN_NAME"), resultSetFileCount.getInt( "DATA_TYPE"))); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index c8d2544c71f..c73990297ab 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -25,12 +25,15 @@ import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_DB_DIR; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_DB_DIR; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT; import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmDeltaRequest; import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmSnapshotRequest; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; @@ -214,7 +217,7 @@ public void testGetAndApplyDeltaUpdatesFromOM() throws Exception { RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb(); TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L); DBUpdates dbUpdatesWrapper = new DBUpdates(); - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); @@ -245,7 +248,7 @@ public void testGetAndApplyDeltaUpdatesFromOM() throws Exception { metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); assertEquals(1, metrics.getNumNonZeroDeltaRequests().value()); - // In this method, we have to assert the "GET" part and the "APPLY" path. + // In this method, we have to assert the "GET" path and the "APPLY" path. // Assert GET path --> verify if the OMDBUpdatesHandler picked up the 4 // events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs). @@ -263,6 +266,76 @@ public void testGetAndApplyDeltaUpdatesFromOM() throws Exception { .getKeyTable(getBucketLayout()).isExist(fullKey)); } + @Test + public void testGetAndApplyDeltaUpdatesFromOMWithLimit() throws Exception { + + // Writing 2 Keys into a source OM DB and collecting it in a + // DBUpdatesWrapper. + OMMetadataManager sourceOMMetadataMgr = + initializeNewOmMetadataManager(temporaryFolder.newFolder()); + writeDataToOm(sourceOMMetadataMgr, "key_one"); + writeDataToOm(sourceOMMetadataMgr, "key_two"); + + RocksDB rocksDB = ((RDBStore)sourceOMMetadataMgr.getStore()).getDb(); + TransactionLogIterator transactionLogIterator = rocksDB.getUpdatesSince(0L); + DBUpdates[] dbUpdatesWrapper = new DBUpdates[4]; + int index = 0; + while (transactionLogIterator.isValid()) { + TransactionLogIterator.BatchResult result = + transactionLogIterator.getBatch(); + result.writeBatch().markWalTerminationPoint(); + WriteBatch writeBatch = result.writeBatch(); + dbUpdatesWrapper[index] = new DBUpdates(); + dbUpdatesWrapper[index].addWriteBatch(writeBatch.data(), + result.sequenceNumber()); + index++; + transactionLogIterator.next(); + } + + // OM Service Provider's Metadata Manager. + OMMetadataManager omMetadataManager = + initializeNewOmMetadataManager(temporaryFolder.newFolder()); + + OzoneConfiguration withLimitConfiguration = + new OzoneConfiguration(configuration); + withLimitConfiguration.setLong(RECON_OM_DELTA_UPDATE_LIMIT, 1); + withLimitConfiguration.setLong(RECON_OM_DELTA_UPDATE_LOOP_LIMIT, 3); + OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = + new OzoneManagerServiceProviderImpl(withLimitConfiguration, + getTestReconOmMetadataManager(omMetadataManager, + temporaryFolder.newFolder()), + getMockTaskController(), new ReconUtils(), + getMockOzoneManagerClientWith4Updates(dbUpdatesWrapper[0], + dbUpdatesWrapper[1], dbUpdatesWrapper[2], dbUpdatesWrapper[3])); + + OMDBUpdatesHandler updatesHandler = + new OMDBUpdatesHandler(omMetadataManager); + ozoneManagerServiceProvider.getAndApplyDeltaUpdatesFromOM( + 0L, updatesHandler); + + OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); + assertEquals(1.0, + metrics.getAverageNumUpdatesInDeltaRequest().value(), 0.0); + assertEquals(3, metrics.getNumNonZeroDeltaRequests().value()); + + // In this method, we have to assert the "GET" path and the "APPLY" path. + + // Assert GET path --> verify if the OMDBUpdatesHandler picked up the first + // 3 of 4 events ( 1 Vol PUT + 1 Bucket PUT + 2 Key PUTs). + assertEquals(3, updatesHandler.getEvents().size()); + + // Assert APPLY path --> Verify if the OM service provider's RocksDB got + // the first 3 changes, last change not applied. + String fullKey = omMetadataManager.getOzoneKey("sampleVol", + "bucketOne", "key_one"); + assertTrue(ozoneManagerServiceProvider.getOMMetadataManagerInstance() + .getKeyTable(getBucketLayout()).isExist(fullKey)); + fullKey = omMetadataManager.getOzoneKey("sampleVol", + "bucketOne", "key_two"); + assertFalse(ozoneManagerServiceProvider.getOMMetadataManagerInstance() + .getKeyTable(getBucketLayout()).isExist(fullKey)); + } + @Test public void testSyncDataFromOMFullSnapshot() throws Exception { @@ -364,6 +437,17 @@ private OzoneManagerProtocol getMockOzoneManagerClient( return ozoneManagerProtocolMock; } + private OzoneManagerProtocol getMockOzoneManagerClientWith4Updates( + DBUpdates updates1, DBUpdates updates2, DBUpdates updates3, + DBUpdates updates4) throws IOException { + OzoneManagerProtocol ozoneManagerProtocolMock = + mock(OzoneManagerProtocol.class); + when(ozoneManagerProtocolMock.getDBUpdates(any(OzoneManagerProtocolProtos + .DBUpdatesRequest.class))).thenReturn(updates1, updates2, updates3, + updates4); + return ozoneManagerProtocolMock; + } + private BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java index 71c9bc4f021..f9d3fc41fc2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java @@ -24,13 +24,18 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.File; import java.io.IOException; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.ozone.test.GenericTestUtils; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -55,6 +60,10 @@ protected void configure() { try { StorageContainerLocationProtocol mockScmClient = mock( StorageContainerLocationProtocol.class); + ReconUtils reconUtils = new ReconUtils(); + File testDir = GenericTestUtils.getRandomizedTestDir(); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath()); pipelineID = PipelineID.randomId().getProtobuf(); when(mockScmClient.getPipeline(pipelineID)) .thenReturn(mock(Pipeline.class)); @@ -62,6 +71,9 @@ protected void configure() { .toInstance(mockScmClient); bind(StorageContainerServiceProvider.class) .to(StorageContainerServiceProviderImpl.class); + bind(OzoneConfiguration.class). + toInstance(conf); + bind(ReconUtils.class).toInstance(reconUtils); } catch (Exception e) { Assert.fail(); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java index 06157d3a1b5..ff5a5bb84c1 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java @@ -85,7 +85,7 @@ public void setUp() throws Exception { } @Test - public void testReprocessOMDB() throws Exception{ + public void testReprocessOMDB() throws Exception { Map keyPrefixesForContainer = reconContainerMetadataManager.getKeyPrefixesForContainer(1); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index 2ff20cfb612..3dd0d2f59a0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -107,7 +107,7 @@ public void testPut() throws Exception { rocksDB.getUpdatesSince(0); List writeBatches = new ArrayList<>(); - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); @@ -190,7 +190,7 @@ public void testDelete() throws Exception { rocksDB.getUpdatesSince(3); List writeBatches = new ArrayList<>(); - while(transactionLogIterator.isValid()) { + while (transactionLogIterator.isValid()) { TransactionLogIterator.BatchResult result = transactionLogIterator.getBatch(); result.writeBatch().markWalTerminationPoint(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java index 8f7f76cd908..81c406e5af2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java @@ -116,7 +116,7 @@ public void testProcess() { ArrayList events = new ArrayList<>(); // Create 5 put, 1 delete and 1 update event for each table for (String tableName: tableCountTask.getTaskTables()) { - for (int i=0; i<5; i++) { + for (int i = 0; i < 5; i++) { events.add(getOMUpdateEvent("item" + i, null, tableName, PUT)); } // for delete event, if value is set to null, the counter will not be diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java index 28292514e14..ab88b18e7eb 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientCache.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.s3; +import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -43,14 +44,14 @@ public final class OzoneClientCache { private static OzoneClientCache instance; private OzoneClient client; - private OzoneClientCache(String omServiceID, - OzoneConfiguration ozoneConfiguration) + private OzoneClientCache(OzoneConfiguration ozoneConfiguration) throws IOException { // S3 Gateway should always set the S3 Auth. ozoneConfiguration.setBoolean(S3Auth.S3_AUTH_CHECK, true); // Set the expected OM version if not set via config. ozoneConfiguration.setIfUnset(OZONE_OM_CLIENT_PROTOCOL_VERSION_KEY, OZONE_OM_CLIENT_PROTOCOL_VERSION); + String omServiceID = OmUtils.getOzoneManagerServiceId(ozoneConfiguration); try { if (omServiceID == null) { client = OzoneClientFactory.getRpcClient(ozoneConfiguration); @@ -65,12 +66,11 @@ private OzoneClientCache(String omServiceID, } } - public static OzoneClient getOzoneClientInstance(String omServiceID, - OzoneConfiguration + public static OzoneClient getOzoneClientInstance(OzoneConfiguration ozoneConfiguration) throws IOException { if (instance == null) { - instance = new OzoneClientCache(omServiceID, ozoneConfiguration); + instance = new OzoneClientCache(ozoneConfiguration); } return instance.client; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java index b50beb0cecd..70ca42ed599 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneClientProducer.java @@ -39,7 +39,7 @@ import org.slf4j.LoggerFactory; import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.INTERNAL_ERROR; -import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.MALFORMED_HEADER; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.ACCESS_DENIED; /** * This class creates the OzoneClient for the Rest endpoints. @@ -58,9 +58,6 @@ public class OzoneClientProducer { @Inject private OzoneConfiguration ozoneConfiguration; - @Inject - private String omServiceID; - @Context private ContainerRequestContext context; @@ -86,7 +83,12 @@ public S3Auth getSignature() { } String awsAccessId = signatureInfo.getAwsAccessId(); - validateAccessId(awsAccessId); + // ONLY validate aws access id when needed. + if (awsAccessId == null || awsAccessId.equals("")) { + LOG.debug("Malformed s3 header. awsAccessID: ", awsAccessId); + throw ACCESS_DENIED; + } + return new S3Auth(stringToSign, signatureInfo.getSignature(), awsAccessId); @@ -106,8 +108,7 @@ private OzoneClient getClient(OzoneConfiguration config) OzoneClient ozoneClient = null; try { ozoneClient = - OzoneClientCache.getOzoneClientInstance(omServiceID, - ozoneConfiguration); + OzoneClientCache.getOzoneClientInstance(ozoneConfiguration); } catch (Exception e) { // For any other critical errors during object creation throw Internal // error. @@ -119,14 +120,6 @@ private OzoneClient getClient(OzoneConfiguration config) return ozoneClient; } - // ONLY validate aws access id when needed. - private void validateAccessId(String awsAccessId) throws Exception { - if (awsAccessId == null || awsAccessId.equals("")) { - LOG.error("Malformed s3 header. awsAccessID: ", awsAccessId); - throw wrapOS3Exception(MALFORMED_HEADER); - } - } - public synchronized void setOzoneConfiguration(OzoneConfiguration config) { this.ozoneConfiguration = config; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java deleted file mode 100644 index 351163c0449..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/OzoneServiceProvider.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.OmUtils; - -import javax.annotation.PostConstruct; -import javax.enterprise.context.ApplicationScoped; -import javax.enterprise.inject.Produces; -import javax.inject.Inject; - -import java.util.Arrays; -import java.util.Collection; - -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_NODES_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - -/** - * This class creates the OM service . - */ -@ApplicationScoped -public class OzoneServiceProvider { - - private String omServiceID; - - @Inject - private OzoneConfiguration conf; - - @PostConstruct - public void init() { - Collection serviceIdList = - conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); - if (!serviceIdList.isEmpty()) { - // HA cluster. - //For now if multiple service id's are configured we throw exception. - // As if multiple service id's are configured, S3Gateway will not be - // knowing which one to talk to. In future, if OM federation is supported - // we can resolve this by having another property like - // ozone.om.internal.service.id. - // TODO: Revisit this later. - if (serviceIdList.size() > 1) { - throw new IllegalArgumentException("Multiple serviceIds are " + - "configured. " + Arrays.toString(serviceIdList.toArray())); - } else { - String serviceId = serviceIdList.iterator().next(); - Collection omNodeIds = OmUtils.getActiveOMNodeIds(conf, - serviceId); - if (omNodeIds.size() == 0) { - throw new IllegalArgumentException(OZONE_OM_NODES_KEY - + "." + serviceId + " is not defined"); - } - omServiceID = serviceId; - } - } - } - - @Produces - public String getOmServiceID() { - return omServiceID; - } - -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 7259d851796..a257155e764 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -90,10 +90,10 @@ public void filter(ContainerRequestContext requestContext) throws if (host.length() > domain.length()) { String bucketName = host.substring(0, host.length() - domain.length()); - if(!bucketName.endsWith(".")) { + if (!bucketName.endsWith(".")) { //Checking this as the virtual host style pattern is http://bucket.host/ throw getException("Invalid S3 Gateway request {" + requestContext - .getUriInfo().getRequestUri().toString() +"}:" +" Host: {" + host + .getUriInfo().getRequestUri().toString() + "}:" + " Host: {" + host + " is in invalid format"); } else { bucketName = bucketName.substring(0, bucketName.length() - 1); @@ -134,7 +134,7 @@ public void setConfiguration(OzoneConfiguration config) { */ private String getDomainName(String host) { String match = null; - int length=0; + int length = 0; for (String domainVal : domains) { if (host.endsWith(domainVal)) { int len = domainVal.length(); @@ -148,7 +148,7 @@ private String getDomainName(String host) { } private String checkHostWithoutPort(String host) { - if (host.contains(":")){ + if (host.contains(":")) { return host.substring(0, host.lastIndexOf(":")); } else { return host; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index dbf8cf31e31..204c1a564fe 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -212,7 +212,7 @@ public Response get( if (count < maxKeys) { response.setTruncated(false); - } else if(ozoneKeyIterator.hasNext()) { + } else if (ozoneKeyIterator.hasNext()) { response.setTruncated(true); ContinueToken nextToken = new ContinueToken(lastKey, prevDir); response.setNextToken(nextToken.encodeToString()); @@ -241,12 +241,12 @@ public Response put(@PathParam("bucket") String bucketName, return Response.status(HttpStatus.SC_OK).header("Location", location) .build(); } catch (OMException exception) { - LOG.error("Error in Create Bucket Request for bucket: {}", bucketName, - exception); if (exception.getResult() == ResultCodes.INVALID_BUCKET_NAME) { throw S3ErrorTable.newError(S3ErrorTable.INVALID_BUCKET_NAME, bucketName); } + LOG.error("Error in Create Bucket Request for bucket: {}", bucketName, + exception); throw exception; } } @@ -291,12 +291,7 @@ public Response listMultipartUploads( @HEAD public Response head(@PathParam("bucket") String bucketName) throws OS3Exception, IOException { - try { - getBucket(bucketName); - } catch (OS3Exception ex) { - LOG.error("Exception occurred in headBucket", ex); - throw ex; - } + getBucket(bucketName); return Response.ok().build(); } @@ -503,12 +498,10 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders, } } // Add new permission on Volume - for(OzoneAcl acl : ozoneAclListOnVolume) { + for (OzoneAcl acl : ozoneAclListOnVolume) { volume.addAcl(acl); } } catch (OMException exception) { - LOG.error("Error in set ACL Request for bucket: {}", bucketName, - exception); if (exception.getResult() == ResultCodes.BUCKET_NOT_FOUND) { throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_BUCKET, bucketName); @@ -516,6 +509,8 @@ public Response putAcl(String bucketName, HttpHeaders httpHeaders, throw S3ErrorTable.newError(S3ErrorTable .ACCESS_DENIED, bucketName); } + LOG.error("Error in set ACL Request for bucket: {}", bucketName, + exception); throw exception; } return Response.status(HttpStatus.SC_OK).build(); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index fcb4e3425f1..248ee92cfdc 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -200,7 +200,7 @@ public Iterator listS3Buckets(String prefix, private Iterator iterateBuckets( Function> query) - throws IOException, OS3Exception{ + throws IOException, OS3Exception { try { return query.apply(getVolume()); } catch (OMException e) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index d2a45b1776e..d6f46e087e1 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -208,23 +208,20 @@ public Response put( return Response.ok().status(HttpStatus.SC_OK) .build(); - } catch (IOException ex) { - LOG.error("Exception occurred in PutObject", ex); - if (ex instanceof OMException) { - if (((OMException) ex).getResult() == ResultCodes.NOT_A_FILE) { - OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, - keyPath); - os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + - "when calling the PutObject/MPU PartUpload operation: " + - OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + - " considered as Unix Paths. Path has Violated FS Semantics " + - "which caused put operation to fail."); - throw os3Exception; - } else if ((((OMException) ex).getResult() == - ResultCodes.PERMISSION_DENIED)) { - throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); - } + } catch (OMException ex) { + if (ex.getResult() == ResultCodes.NOT_A_FILE) { + OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, + keyPath); + os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + + "when calling the PutObject/MPU PartUpload operation: " + + OZONE_OM_ENABLE_FILESYSTEM_PATHS + " is enabled Keys are" + + " considered as Unix Paths. Path has Violated FS Semantics " + + "which caused put operation to fail."); + throw os3Exception; + } else if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { + throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, keyPath); } + LOG.error("Exception occurred in PutObject", ex); throw ex; } finally { if (output != null) { @@ -497,11 +494,11 @@ public Response initializeMultipartUpload( return Response.status(Status.OK).entity( multipartUploadInitiateResponse).build(); } catch (OMException ex) { - LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + - "key: {}", bucket, key, ex); if (ex.getResult() == ResultCodes.PERMISSION_DENIED) { throw S3ErrorTable.newError(S3ErrorTable.ACCESS_DENIED, key); } + LOG.error("Error in Initiate Multipart Upload Request for bucket: {}, " + + "key: {}", bucket, key, ex); throw ex; } } @@ -544,8 +541,6 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, return Response.status(Status.OK).entity(completeMultipartUploadResponse) .build(); } catch (OMException ex) { - LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + - ", key: {}", bucket, key, ex); if (ex.getResult() == ResultCodes.INVALID_PART) { throw S3ErrorTable.newError(S3ErrorTable.INVALID_PART, key); } else if (ex.getResult() == ResultCodes.INVALID_PART_ORDER) { @@ -554,13 +549,13 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, throw S3ErrorTable.newError(NO_SUCH_UPLOAD, uploadID); } else if (ex.getResult() == ResultCodes.ENTITY_TOO_SMALL) { throw S3ErrorTable.newError(ENTITY_TOO_SMALL, key); - } else if(ex.getResult() == ResultCodes.INVALID_REQUEST) { + } else if (ex.getResult() == ResultCodes.INVALID_REQUEST) { OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: You must " + "specify at least one part"); throw os3Exception; - } else if(ex.getResult() == ResultCodes.NOT_A_FILE) { + } else if (ex.getResult() == ResultCodes.NOT_A_FILE) { OS3Exception os3Exception = S3ErrorTable.newError(INVALID_REQUEST, key); os3Exception.setErrorMessage("An error occurred (InvalidRequest) " + "when calling the CompleteMultipartUpload operation: " + @@ -569,6 +564,8 @@ public Response completeMultipartUpload(@PathParam("bucket") String bucket, "given KeyName caused failure for MPU"); throw os3Exception; } + LOG.error("Error in Complete Multipart Upload Request for bucket: {}, " + + ", key: {}", bucket, key, ex); throw ex; } } @@ -883,7 +880,7 @@ private static OptionalLong parseAndValidateDate(String ozoneDateStr) { } long currentDate = System.currentTimeMillis(); - if (ozoneDateInMs <= currentDate){ + if (ozoneDateInMs <= currentDate) { return OptionalLong.of(ozoneDateInMs); } else { // dates in the future are invalid, so return empty() diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java index c59c4d19663..792f2e2ef5e 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3Acl.java @@ -83,7 +83,7 @@ public String getValue() { public static ACLType getType(String typeStr) { - for(ACLType type: ACLType.values()) { + for (ACLType type: ACLType.values()) { if (type.getValue().equals(typeStr)) { return type; } @@ -139,7 +139,7 @@ boolean isSupported() { } public static ACLIdentityType getTypeFromGranteeType(String typeStr) { - for(ACLIdentityType type: ACLIdentityType.values()) { + for (ACLIdentityType type: ACLIdentityType.values()) { if (type.getGranteeType().equals(typeStr)) { return type; } @@ -148,7 +148,7 @@ public static ACLIdentityType getTypeFromGranteeType(String typeStr) { } public static ACLIdentityType getTypeFromHeaderType(String typeStr) { - for(ACLIdentityType type: ACLIdentityType.values()) { + for (ACLIdentityType type: ACLIdentityType.values()) { if (type.getHeaderType().equals(typeStr)) { return type; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java index 66f931fdef7..ee9e1a0ce33 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/S3BucketAcl.java @@ -169,10 +169,10 @@ public static class Grantee { @XmlElement(name = "ID") private String id; - @XmlAttribute(name="xsi:type") + @XmlAttribute(name = "xsi:type") private String xsiType = "CanonicalUser"; - @XmlAttribute(name="xmlns:xsi") + @XmlAttribute(name = "xmlns:xsi") private String xsiNs = "http://www.w3.org/2001/XMLSchema-instance"; public String getXsiNs() { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java index 86d9fc049cd..d36e81d4969 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/exception/S3ErrorTable.java @@ -26,7 +26,7 @@ import static java.net.HttpURLConnection.HTTP_NOT_FOUND; import static java.net.HttpURLConnection.HTTP_PRECON_FAILED; import static java.net.HttpURLConnection.HTTP_NOT_IMPLEMENTED; -import static java.net.HttpURLConnection.HTTP_SERVER_ERROR; +import static java.net.HttpURLConnection.HTTP_INTERNAL_ERROR; import static org.apache.hadoop.ozone.s3.util.S3Consts.RANGE_NOT_SATISFIABLE; /** @@ -65,7 +65,7 @@ private S3ErrorTable() { public static final OS3Exception MALFORMED_HEADER = new OS3Exception( "AuthorizationHeaderMalformed", "The authorization header you provided " + - "is invalid.", HTTP_NOT_FOUND); + "is invalid.", HTTP_BAD_REQUEST); public static final OS3Exception NO_SUCH_KEY = new OS3Exception( "NoSuchKey", "The specified key does not exist", HTTP_NOT_FOUND); @@ -106,7 +106,7 @@ private S3ErrorTable() { public static final OS3Exception INTERNAL_ERROR = new OS3Exception( "InternalError", "We encountered an internal error. Please try again.", - HTTP_SERVER_ERROR); + HTTP_INTERNAL_ERROR); public static final OS3Exception ACCESS_DENIED = new OS3Exception( "AccessDenied", "User doesn't have the right to access this " + @@ -130,7 +130,11 @@ public static OS3Exception newError(OS3Exception e, String resource) { OS3Exception err = new OS3Exception(e.getCode(), e.getErrorMessage(), e.getHttpCode()); err.setResource(resource); - LOG.error(err.toXml(), e); + if (e.getHttpCode() == HTTP_INTERNAL_ERROR) { + LOG.error("Internal Error: {}", err.toXml(), e); + } else if (LOG.isDebugEnabled()) { + LOG.debug(err.toXml(), e); + } return err; } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java index 14bf2a23cf4..1783b587311 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/signature/Credential.java @@ -71,7 +71,7 @@ public void parseCredential() throws OS3Exception { case 6: // Access id is kerberos principal. // Ex: testuser/om@EXAMPLE.COM/20190321/us-west-1/s3/aws4_request - accessKeyID = split[0] + "/" +split[1]; + accessKeyID = split[0] + "/" + split[1]; date = split[2].trim(); awsRegion = split[3].trim(); awsService = split[4].trim(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index cf0d4c325dd..6a0d4287256 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -87,7 +87,7 @@ public OzoneBucketStub( @Override public OzoneOutputStream createKey(String key, long size) throws IOException { - return createKey(key, size, ReplicationType.STAND_ALONE, + return createKey(key, size, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java index 4c17ecc95fc..a158e0212e0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/protocolPB/TestGrpcOmTransport.java @@ -30,6 +30,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_TRANSPORT_CLASS; + /** * Tests for GrpcOmTransport. */ @@ -44,7 +46,10 @@ public class TestGrpcOmTransport { @Test public void testGrpcOmTransportFactory() throws Exception { String omServiceId = ""; + String transportCls = GrpcOmTransport.class.getName(); OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(OZONE_OM_TRANSPORT_CLASS, + transportCls); UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); OmTransport omTransport = OmTransportFactory.create(conf, ugi, omServiceId); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java index d29136413c7..d02c3ccde8b 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestOzoneClientProducer.java @@ -17,19 +17,37 @@ */ package org.apache.hadoop.ozone.s3; +import javax.ws.rs.WebApplicationException; +import javax.ws.rs.container.ContainerRequestContext; +import javax.ws.rs.core.MultivaluedHashMap; +import javax.ws.rs.core.MultivaluedMap; +import javax.ws.rs.core.UriInfo; import java.io.IOException; +import java.net.URI; import java.util.Arrays; import java.util.Collection; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.s3.signature.AWSSignatureProcessor; +import static java.net.HttpURLConnection.HTTP_BAD_REQUEST; +import static java.net.HttpURLConnection.HTTP_FORBIDDEN; + +import static org.apache.hadoop.ozone.s3.signature.SignatureParser.AUTHORIZATION_HEADER; +import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_MD5; +import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.CONTENT_TYPE; +import static org.apache.hadoop.ozone.s3.signature.SignatureProcessor.HOST_HEADER; +import static org.apache.hadoop.ozone.s3.signature.StringToSignProducer.X_AMAZ_DATE; +import static org.apache.hadoop.ozone.s3.signature.StringToSignProducer.X_AMZ_CONTENT_SHA256; import static org.junit.Assert.fail; + import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import org.mockito.Mockito; /** * Test class for @{@link OzoneClientProducer}. @@ -38,16 +56,37 @@ public class TestOzoneClientProducer { private OzoneClientProducer producer; + private MultivaluedMap headerMap; + private MultivaluedMap queryMap; + private String authHeader; + private String contentMd5; + private String host; + private String amzContentSha256; + private String date; + private String contentType; + private ContainerRequestContext context; + private UriInfo uriInfo; public TestOzoneClientProducer( String authHeader, String contentMd5, String host, String amzContentSha256, String date, String contentType ) throws Exception { + this.authHeader = authHeader; + this.contentMd5 = contentMd5; + this.host = host; + this.amzContentSha256 = amzContentSha256; + this.date = date; + this.contentType = contentType; producer = new OzoneClientProducer(); + headerMap = new MultivaluedHashMap<>(); + queryMap = new MultivaluedHashMap<>(); + uriInfo = Mockito.mock(UriInfo.class); + context = Mockito.mock(ContainerRequestContext.class); OzoneConfiguration config = new OzoneConfiguration(); config.setBoolean(OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY, true); config.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, ""); + setupContext(); producer.setOzoneConfiguration(config); } @@ -83,7 +122,10 @@ public static Collection data() { }, { null, null, null, null, null, null - } + }, + { + "", null, null, null, null, null + }, }); } @@ -97,4 +139,91 @@ public void testGetClientFailure() { } } + @Test + public void testGetSignature() { + try { + System.err.println("Testing: " + authHeader); + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "ozone1"); + configuration.set(OMConfigKeys.OZONE_OM_ADDRESS_KEY, "ozone1addr:9399"); + producer.setOzoneConfiguration(configuration); + producer.getSignature(); + if ("".equals(authHeader)) { + fail("Empty AuthHeader must fail"); + } + } catch (WebApplicationException ex) { + if (authHeader == null || authHeader.equals("")) { + // Empty auth header should be 403 + Assert.assertEquals(HTTP_FORBIDDEN, ex.getResponse().getStatus()); + // TODO: Should return XML in body like this (bot not for now): + // + // AccessDeniedAccess Denied + // ...... + // + } else { + // Other requests have stale timestamp and thus should fail + Assert.assertEquals(HTTP_BAD_REQUEST, ex.getResponse().getStatus()); + } + } catch (Exception ex) { + fail("Unexpected exception: " + ex); + } + } + + @Test + public void testGetClientFailureWithMultipleServiceIds() { + try { + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "ozone1,ozone2"); + producer.setOzoneConfiguration(configuration); + producer.createClient(); + fail("testGetClientFailureWithMultipleServiceIds"); + } catch (Exception ex) { + Assert.assertTrue(ex instanceof IOException); + Assert.assertTrue(ex.getMessage().contains( + "More than 1 OzoneManager ServiceID")); + } + } + + @Test + public void testGetClientFailureWithMultipleServiceIdsAndInternalServiceId() { + try { + OzoneConfiguration configuration = new OzoneConfiguration(); + configuration.set(OMConfigKeys.OZONE_OM_INTERNAL_SERVICE_ID, "ozone1"); + configuration.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "ozone1,ozone2"); + producer.setOzoneConfiguration(configuration); + producer.createClient(); + fail("testGetClientFailureWithMultipleServiceIdsAndInternalServiceId"); + } catch (Exception ex) { + Assert.assertTrue(ex instanceof IOException); + // Still test will fail, as config is not complete. But it should pass + // the service id check. + Assert.assertFalse(ex.getMessage().contains( + "More than 1 OzoneManager ServiceID")); + } + } + + private void setupContext() throws Exception { + headerMap.putSingle(AUTHORIZATION_HEADER, authHeader); + headerMap.putSingle(CONTENT_MD5, contentMd5); + headerMap.putSingle(HOST_HEADER, host); + headerMap.putSingle(X_AMZ_CONTENT_SHA256, amzContentSha256); + headerMap.putSingle(X_AMAZ_DATE, date); + headerMap.putSingle(CONTENT_TYPE, contentType); + + Mockito.when(uriInfo.getQueryParameters()).thenReturn(queryMap); + Mockito.when(uriInfo.getRequestUri()).thenReturn(new URI("")); + + Mockito.when(context.getUriInfo()).thenReturn(uriInfo); + Mockito.when(context.getHeaders()).thenReturn(headerMap); + Mockito.when(context.getHeaderString(AUTHORIZATION_HEADER)) + .thenReturn(authHeader); + Mockito.when(context.getUriInfo().getQueryParameters()) + .thenReturn(queryMap); + + AWSSignatureProcessor awsSignatureProcessor = new AWSSignatureProcessor(); + awsSignatureProcessor.setContext(context); + + producer.setSignatureParser(awsSignatureProcessor); + } + } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 2a46c55f140..19d9380de91 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -66,7 +66,7 @@ public ContainerRequest createContainerRequest(String host, String path, virtualHostStyleUri = new URI("http://" + s3HttpAddr); } else if (path != null && queryParams == null) { virtualHostStyleUri = new URI("http://" + s3HttpAddr + path); - } else if (path !=null && queryParams != null) { + } else if (path != null && queryParams != null) { virtualHostStyleUri = new URI("http://" + s3HttpAddr + path + queryParams); } else { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java index 6666da715f5..df4b5f43d91 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/commontypes/TestObjectKeyNameAdapter.java @@ -37,7 +37,7 @@ public void testEncodeResult() throws Exception { getAdapter().marshal("a+b+c/")); } - private XmlAdapter getAdapter(){ + private XmlAdapter getAdapter() { return (new ObjectKeyNameAdapter()); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java index 66c74562869..3c9a17cb5f7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java @@ -69,7 +69,7 @@ public void testHeadObject() throws Exception { //GIVEN String value = RandomStringUtils.randomAlphanumeric(32); OzoneOutputStream out = bucket.createKey("key1", - value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); out.write(value.getBytes(UTF_8)); out.close(); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java index 19ab3bfb7e8..f5e4a061571 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java @@ -57,7 +57,7 @@ public void testListBucket() throws Exception { assertEquals(0, response.getBucketsNum()); String bucketBaseName = "bucket-" + getClass().getName(); - for(int i = 0; i < 10; i++) { + for (int i = 0; i < 10; i++) { clientStub.getObjectStore().createS3Bucket(bucketBaseName + i); } response = (ListBucketResponse) rootEndpoint.get().getEntity(); diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml index 76127b7f06f..d263a069b6d 100644 --- a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml @@ -13,10 +13,6 @@ limitations under the License. See accompanying LICENSE file. --> - - - - diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 33535496147..47501795307 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -42,11 +42,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - - org.apache.ozone - hdds-server-scm - org.apache.ozone hdds-tools @@ -93,16 +88,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> javax.activation activation - - org.openjdk.jmh - jmh-core - provided - - - org.openjdk.jmh - jmh-generator-annprocess - provided - io.dropwizard.metrics metrics-core diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java index de0bd7eedd4..55be795e271 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/FinalizeUpgradeSubCommand.java @@ -88,11 +88,11 @@ public Void call() throws Exception { try { UpgradeFinalizer.StatusAndMessages finalizationResponse = client.finalizeUpgrade(upgradeClientID); - if (isFinalized(finalizationResponse.status())){ + if (isFinalized(finalizationResponse.status())) { System.out.println("Upgrade has already been finalized."); emitExitMsg(); return null; - } else if (!isStarting(finalizationResponse.status())){ + } else if (!isStarting(finalizationResponse.status())) { System.err.println("Invalid response from Ozone Manager."); System.err.println( "Current finalization status is: " + finalizationResponse.status() @@ -116,7 +116,7 @@ private void monitorAndWaitFinalization(OzoneManagerProtocol client, emitFinishedMsg("Ozone Manager"); } catch (CancellationException e) { emitCancellationMsg("Ozone Manager"); - } catch (InterruptedException e){ + } catch (InterruptedException e) { emitCancellationMsg("Ozone Manager"); Thread.currentThread().interrupt(); } catch (ExecutionException e) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java index fd354f76aba..8e46485218b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeScmUpgradeSubcommand.java @@ -72,11 +72,11 @@ public void execute(ScmClient scmClient) throws IOException { try { StatusAndMessages finalizationResponse = scmClient.finalizeScmUpgrade(upgradeClientID); - if (isFinalized(finalizationResponse.status())){ + if (isFinalized(finalizationResponse.status())) { System.out.println("Upgrade has already been finalized."); emitExitMsg(); return; - } else if (!isStarting(finalizationResponse.status())){ + } else if (!isStarting(finalizationResponse.status())) { System.err.println("Invalid response from Storage Container Manager."); System.err.println( "Current finalization status is: " + finalizationResponse.status() @@ -101,7 +101,7 @@ private void monitorAndWaitFinalization(ScmClient client, emitFinishedMsg("Storage Container Manager"); } catch (CancellationException e) { emitCancellationMsg("Storage Container Manager"); - } catch (InterruptedException e){ + } catch (InterruptedException e) { emitCancellationMsg("Storage Container Manager"); Thread.currentThread().interrupt(); } catch (ExecutionException e) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java index 539ac286065..308b90036a6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/FinalizeUpgradeCommandUtil.java @@ -80,13 +80,13 @@ public static void emitGeneralErrorMsg() { } public static void emitFinishedMsg(String component) { - System.out.println("Finalization of " + component +"'s metadata upgrade " + System.out.println("Finalization of " + component + "'s metadata upgrade " + "finished."); } public static void emitCancellationMsg(String component) { System.err.println("Finalization command was cancelled. Note that, this" - + "will not cancel finalization in " + component +". Progress can be" + + "will not cancel finalization in " + component + ". Progress can be" + "monitored in the Ozone Manager's log."); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java index ae64c94ed2d..9af8a7447c5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/AuditParser.java @@ -54,7 +54,7 @@ public static void main(String[] argv) throws Exception { new AuditParser().run(argv); } - public String getDatabase(){ + public String getDatabase() { return database; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java index 8750e19bc11..725b2b89fbc 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/common/DatabaseHelper.java @@ -70,7 +70,7 @@ private static Connection getConnection(String dbName) throws Exception { private static void loadProperties() { Properties props = new Properties(); - try{ + try { InputStream inputStream = DatabaseHelper.class.getClassLoader() .getResourceAsStream(ParserConsts.PROPS_FILE); if (inputStream != null) { @@ -85,7 +85,7 @@ private static void loadProperties() { throw new FileNotFoundException("property file '" + ParserConsts.PROPS_FILE + "' not found in the classpath"); } - } catch (Exception e){ + } catch (Exception e) { LOG.error(e.getMessage()); } @@ -145,14 +145,14 @@ private static ArrayList parseAuditLogs(String filePath) AuditEntry tempEntry = null; while (true) { - if (tempEntry == null){ + if (tempEntry == null) { tempEntry = new AuditEntry(); } if (currentLine == null) { break; } else { - if (!currentLine.matches(ParserConsts.DATE_REGEX)){ + if (!currentLine.matches(ParserConsts.DATE_REGEX)) { tempEntry.appendException(currentLine); } else { entry = StringUtils.stripAll(currentLine.split("\\|")); @@ -168,11 +168,11 @@ private static ArrayList parseAuditLogs(String filePath) .setParams(ops[1]) .setResult(entry[6].substring(entry[6].indexOf('=') + 1)) .build(); - if (entry.length == 8){ + if (entry.length == 8) { tempEntry.setException(entry[7]); } } - if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)){ + if (nextLine == null || nextLine.matches(ParserConsts.DATE_REGEX)) { listResult.add(tempEntry); tempEntry = null; } @@ -205,8 +205,8 @@ private static String executeStatement(String dbName, String sql) if (rs != null) { rsm = rs.getMetaData(); int cols = rsm.getColumnCount(); - while (rs.next()){ - for (int index = 1; index <= cols; index++){ + while (rs.next()) { + for (int index = 1; index <= cols; index++) { result.append(rs.getObject(index)); result.append("\t"); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java index 5d585596fbe..f3f8c459f45 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/handler/TemplateCommandHandler.java @@ -53,7 +53,7 @@ public class TemplateCommandHandler implements Callable { @Override public Void call() throws Exception { try { - if(DatabaseHelper.validateTemplate(template)) { + if (DatabaseHelper.validateTemplate(template)) { System.out.println( DatabaseHelper.executeTemplate(auditParser.getDatabase(), template) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java index c6b0b337a76..035bf26ebd0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/audit/parser/model/AuditEntry.java @@ -30,7 +30,7 @@ public class AuditEntry { private String result; private String exception; - public AuditEntry(){} + public AuditEntry() { } public String getUser() { return user; @@ -104,7 +104,7 @@ public void setException(String exception) { this.exception = exception.trim(); } - public void appendException(String text){ + public void appendException(String text) { this.exception += "\n" + text.trim(); } @@ -126,47 +126,47 @@ public Builder() { } - public Builder setTimestamp(String ts){ + public Builder setTimestamp(String ts) { this.timestamp = ts; return this; } - public Builder setLevel(String lvl){ + public Builder setLevel(String lvl) { this.level = lvl; return this; } - public Builder setLogger(String lgr){ + public Builder setLogger(String lgr) { this.logger = lgr; return this; } - public Builder setUser(String usr){ + public Builder setUser(String usr) { this.user = usr; return this; } - public Builder setIp(String ipAddress){ + public Builder setIp(String ipAddress) { this.ip = ipAddress; return this; } - public Builder setOp(String operation){ + public Builder setOp(String operation) { this.op = operation; return this; } - public Builder setParams(String prms){ + public Builder setParams(String prms) { this.params = prms; return this; } - public Builder setResult(String res){ + public Builder setResult(String res) { this.result = res; return this; } - public Builder setException(String exp){ + public Builder setException(String exp) { this.exception = exp; return this; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java index a5ed310433e..24c8f243e88 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ChunkKeyHandler.java @@ -44,7 +44,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -77,7 +77,7 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) - throws IOException, OzoneClientException{ + throws IOException, OzoneClientException { containerOperationClient = new ContainerOperationClient(createOzoneConfiguration()); xceiverClientManager = containerOperationClient @@ -105,11 +105,11 @@ protected void execute(OzoneClient client, OzoneAddress address) List locationInfos = keyInfo .getLatestVersionLocations().getBlocksLatestVersionOnly(); // for zero-sized key - if(locationInfos.isEmpty()){ + if (locationInfos.isEmpty()) { System.out.println("No Key Locations Found"); return; } - ChunkLayOutVersion chunkLayOutVersion = ChunkLayOutVersion + ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion .getConfiguredVersion(getConf()); JsonArray responseArrayList = new JsonArray(); for (OmKeyLocationInfo keyLocation:locationInfos) { @@ -142,7 +142,7 @@ protected void execute(OzoneClient client, OzoneAddress address) for (Map.Entry entry: responses.entrySet()) { JsonObject jsonObj = new JsonObject(); - if(entry.getValue() == null){ + if (entry.getValue() == null) { LOG.error("Cant execute getBlock on this node"); continue; } @@ -152,7 +152,7 @@ protected void execute(OzoneClient client, OzoneAddress address) keyLocation.getContainerID(), keyLocation.getPipeline()); for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) { - String fileName = chunkLayOutVersion.getChunkFile(new File( + String fileName = containerLayoutVersion.getChunkFile(new File( getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java index 2ecfa138a57..275908e6414 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DBScanner.java @@ -177,7 +177,7 @@ private static ColumnFamilyHandle getColumnFamilyHandle( } private void constructColumnFamilyMap(DBDefinition dbDefinition) { - if (dbDefinition == null){ + if (dbDefinition == null) { System.out.println("Incorrect Db Path"); return; } @@ -217,7 +217,7 @@ private void printAppropriateTable( DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion); this.constructColumnFamilyMap(DBDefinitionFactory. getDefinition(Paths.get(dbPath))); - if (this.columnFamilyMap !=null) { + if (this.columnFamilyMap != null) { if (!this.columnFamilyMap.containsKey(tableName)) { System.out.print("Table with name:" + tableName + " does not exist"); } else { @@ -239,8 +239,8 @@ private void printAppropriateTable( } private String removeTrailingSlashIfNeeded(String dbPath) { - if(dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)){ - dbPath = dbPath.substring(0, dbPath.length()-1); + if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { + dbPath = dbPath.substring(0, dbPath.length() - 1); } return dbPath; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java index cabddf97720..0ebc8324562 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java @@ -170,7 +170,7 @@ public void parse(String vol, String buck, String db, dumpInfo(Types.BUCKET, effectivePath, objectBucketId, bucketKey); Iterator pathIterator = p.iterator(); - while(pathIterator.hasNext()) { + while (pathIterator.hasNext()) { Path elem = pathIterator.next(); String path = metadataManager.getOzonePathKey(lastObjectId, elem.toString()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java new file mode 100644 index 00000000000..c1ce49ddb32 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -0,0 +1,247 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.debug; + +import com.google.gson.*; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.client.protocol.ClientProtocol; +import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.common.OzoneChecksumException; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.shell.OzoneAddress; +import org.apache.hadoop.ozone.shell.keys.KeyHandler; +import org.apache.ratis.thirdparty.io.grpc.StatusRuntimeException; +import org.jetbrains.annotations.NotNull; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.io.File; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; +import java.text.SimpleDateFormat; +import java.util.Date; +import java.util.Map; + +/** + * Class that downloads every replica for all the blocks associated with a + * given key. It also generates a manifest file with information about the + * downloaded replicas. + */ +@CommandLine.Command(name = "read-replicas", + description = "Reads every replica for all the blocks associated with a " + + "given key.") +@MetaInfServices(SubcommandWithParent.class) +public class ReadReplicas extends KeyHandler implements SubcommandWithParent { + + @CommandLine.Option(names = {"--outputDir", "-o"}, + description = "Destination where the directory will be created" + + " for the downloaded replicas and the manifest file.", + defaultValue = "/opt/hadoop") + private String outputDir; + + private static final String JSON_PROPERTY_FILE_NAME = "filename"; + private static final String JSON_PROPERTY_FILE_SIZE = "datasize"; + private static final String JSON_PROPERTY_FILE_BLOCKS = "blocks"; + private static final String JSON_PROPERTY_BLOCK_INDEX = "blockIndex"; + private static final String JSON_PROPERTY_BLOCK_CONTAINERID = "containerId"; + private static final String JSON_PROPERTY_BLOCK_LOCALID = "localId"; + private static final String JSON_PROPERTY_BLOCK_LENGTH = "length"; + private static final String JSON_PROPERTY_BLOCK_OFFSET = "offset"; + private static final String JSON_PROPERTY_BLOCK_REPLICAS = "replicas"; + private static final String JSON_PROPERTY_REPLICA_HOSTNAME = "hostname"; + private static final String JSON_PROPERTY_REPLICA_UUID = "uuid"; + private static final String JSON_PROPERTY_REPLICA_EXCEPTION = "exception"; + + private ClientProtocol clientProtocol; + private ClientProtocol clientProtocolWithoutChecksum; + + @Override + public Class getParentType() { + return OzoneDebug.class; + } + + @Override + protected void execute(OzoneClient client, OzoneAddress address) + throws IOException, OzoneClientException { + + boolean isChecksumVerifyEnabled + = getConf().getBoolean("ozone.client.verify.checksum", true); + OzoneConfiguration configuration = new OzoneConfiguration(getConf()); + configuration.setBoolean("ozone.client.verify.checksum", + !isChecksumVerifyEnabled); + + if (isChecksumVerifyEnabled) { + clientProtocol = client.getObjectStore().getClientProxy(); + clientProtocolWithoutChecksum = new RpcClient(configuration, null); + } else { + clientProtocol = new RpcClient(configuration, null); + clientProtocolWithoutChecksum = client.getObjectStore().getClientProxy(); + } + + address.ensureKeyAddress(); + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + String keyName = address.getKeyName(); + + String directoryName = createDirectory(volumeName, bucketName, keyName); + + OzoneKeyDetails keyInfoDetails + = clientProtocol.getKeyDetails(volumeName, bucketName, keyName); + + Map> replicas + = clientProtocol.getKeysEveryReplicas(volumeName, bucketName, keyName); + + Map> + replicasWithoutChecksum = clientProtocolWithoutChecksum + .getKeysEveryReplicas(volumeName, bucketName, keyName); + + JsonObject result = new JsonObject(); + result.addProperty(JSON_PROPERTY_FILE_NAME, + volumeName + "/" + bucketName + "/" + keyName); + result.addProperty(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); + + JsonArray blocks = new JsonArray(); + downloadReplicasAndCreateManifest(keyName, replicas, + replicasWithoutChecksum, directoryName, blocks); + result.add(JSON_PROPERTY_FILE_BLOCKS, blocks); + + Gson gson = new GsonBuilder().setPrettyPrinting().create(); + String prettyJson = gson.toJson(result); + + String manifestFileName = keyName + "_manifest"; + System.out.println("Writing manifest file : " + manifestFileName); + File manifestFile + = new File(outputDir + "/" + directoryName + "/" + manifestFileName); + Files.write(manifestFile.toPath(), + prettyJson.getBytes(StandardCharsets.UTF_8)); + } + + private void downloadReplicasAndCreateManifest( + String keyName, + Map> replicas, + Map> + replicasWithoutChecksum, + String directoryName, JsonArray blocks) throws IOException { + int blockIndex = 0; + + for (Map.Entry> + block : replicas.entrySet()) { + JsonObject blockJson = new JsonObject(); + JsonArray replicasJson = new JsonArray(); + + blockIndex += 1; + blockJson.addProperty(JSON_PROPERTY_BLOCK_INDEX, blockIndex); + blockJson.addProperty(JSON_PROPERTY_BLOCK_CONTAINERID, + block.getKey().getContainerID()); + blockJson.addProperty(JSON_PROPERTY_BLOCK_LOCALID, + block.getKey().getLocalID()); + blockJson.addProperty(JSON_PROPERTY_BLOCK_LENGTH, + block.getKey().getLength()); + blockJson.addProperty(JSON_PROPERTY_BLOCK_OFFSET, + block.getKey().getOffset()); + + for (Map.Entry + replica : block.getValue().entrySet()) { + JsonObject replicaJson = new JsonObject(); + + replicaJson.addProperty(JSON_PROPERTY_REPLICA_HOSTNAME, + replica.getKey().getHostName()); + replicaJson.addProperty(JSON_PROPERTY_REPLICA_UUID, + replica.getKey().getUuidString()); + + OzoneInputStream is = replica.getValue(); + String fileName = keyName + "_block" + blockIndex + "_" + + replica.getKey().getHostName(); + System.out.println("Writing : " + fileName); + File replicaFile + = new File(outputDir + "/" + directoryName + "/" + fileName); + + try { + Files.copy(is, replicaFile.toPath(), + StandardCopyOption.REPLACE_EXISTING); + } catch (IOException e) { + Throwable cause = e.getCause(); + replicaJson.addProperty(JSON_PROPERTY_REPLICA_EXCEPTION, + e.getMessage()); + if (cause instanceof OzoneChecksumException) { + BlockID blockID = block.getKey().getBlockID(); + String datanodeUUID = replica.getKey().getUuidString(); + is = getInputStreamWithoutChecksum(replicasWithoutChecksum, + datanodeUUID, blockID); + Files.copy(is, replicaFile.toPath(), + StandardCopyOption.REPLACE_EXISTING); + } else if (cause instanceof StatusRuntimeException) { + break; + } + } finally { + is.close(); + } + replicasJson.add(replicaJson); + } + blockJson.add(JSON_PROPERTY_BLOCK_REPLICAS, replicasJson); + blocks.add(blockJson); + } + } + + private OzoneInputStream getInputStreamWithoutChecksum( + Map> + replicasWithoutChecksum, String datanodeUUID, BlockID blockID) { + OzoneInputStream is = new OzoneInputStream(); + for (Map.Entry> + block : replicasWithoutChecksum.entrySet()) { + if (block.getKey().getBlockID().equals(blockID)) { + for (Map.Entry + replica : block.getValue().entrySet()) { + if (replica.getKey().getUuidString().equals(datanodeUUID)) { + is = replica.getValue(); + } + } + } + } + return is; + } + + @NotNull + private String createDirectory(String volumeName, String bucketName, + String keyName) throws IOException { + String fileSuffix + = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date()); + String directoryName = volumeName + "_" + bucketName + "_" + keyName + + "_" + fileSuffix; + System.out.println("Creating directory : " + directoryName); + File dir = new File(outputDir + "/" + directoryName); + if (!dir.exists()) { + if (dir.mkdir()) { + System.out.println("Successfully created!"); + } else { + throw new IOException(String.format( + "Failed to create directory %s.", dir)); + } + } + return directoryName; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 0a639ec148c..a7b330ce796 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -309,7 +309,7 @@ public void printReport() { /** * Print out reports with the given message. */ - public void print(String msg){ + public void print(String msg) { Consumer print = freonCommand.isInteractive() ? System.out::println : LOG::info; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java index e95747cf327..3efc08c4792 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; @@ -80,7 +80,7 @@ public class ChunkManagerDiskWrite extends BaseFreonGenerator implements description = "Strategy to layout files in the container", defaultValue = "FILE_PER_CHUNK" ) - private ChunkLayOutVersion chunkLayout; + private ContainerLayoutVersion containerLayout; private ChunkManager chunkManager; @@ -120,7 +120,7 @@ public Void call() throws Exception { KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, - chunkLayout, + containerLayout, 1_000_000L, getPrefix(), "nodeid"); @@ -143,7 +143,7 @@ public Void call() throws Exception { LOG.info("Running chunk write test: threads={} chunkSize={} " + "chunksPerBlock={} layout={}", - threadCount, chunkSize, chunksPerBlock, chunkLayout); + threadCount, chunkSize, chunksPerBlock, containerLayout); runTests(this::writeChunk); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java index e774fcdccd8..9e73bfb637e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java @@ -136,7 +136,7 @@ public Void call() throws Exception { } else { xceiverClients = new ArrayList<>(); pipelines = new HashSet<>(); - for(String pipelineId:pipelinesFromCmd){ + for (String pipelineId:pipelinesFromCmd) { List selectedPipelines = pipelinesFromSCM.stream() .filter((p -> p.getId().toString() .equals("PipelineID=" + pipelineId) @@ -144,11 +144,11 @@ public Void call() throws Exception { .collect(Collectors.toList()); pipelines.addAll(selectedPipelines); } - for (Pipeline p:pipelines){ + for (Pipeline p:pipelines) { LOG.info("Writing to pipeline: " + p.getId()); xceiverClients.add(xceiverClientManager.acquireClient(p)); } - if (pipelines.isEmpty()){ + if (pipelines.isEmpty()) { throw new IllegalArgumentException( "Couldn't find the any/the selected pipeline"); } @@ -166,8 +166,8 @@ public Void call() throws Exception { private boolean pipelineContainsDatanode(Pipeline p, List datanodeHosts) { - for (DatanodeDetails dn:p.getNodes()){ - if (datanodeHosts.contains(dn.getHostName())){ + for (DatanodeDetails dn:p.getNodes()) { + if (datanodeHosts.contains(dn.getHostName())) { return true; } } @@ -219,7 +219,7 @@ private void writeChunk(long stepNo) .setData(dataToWrite); XceiverClientSpi clientSpi = xceiverClients.get( - (int) (stepNo%(xceiverClients.size()))); + (int) (stepNo % (xceiverClients.size()))); sendWriteChunkRequest(blockId, writeChunkRequest, clientSpi); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index b0937d0ba61..88ec44d914c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -175,7 +175,7 @@ private void createSubDirRecursively(String parent, int depthIndex, } } - while(spanIndex < span) { + while (spanIndex < span) { String levelSubDir = makeDirWithGivenNumberOfFiles(parent); ++spanIndex; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java index e1672187973..02fa7e6373e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java @@ -49,7 +49,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; +import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; @@ -316,8 +316,8 @@ private void generatedRandomData(SplittableRandom random, byte[] data) { private KeyValueContainer createContainer(long containerId) throws IOException { - ChunkLayOutVersion layoutVersion = - ChunkLayOutVersion.getConfiguredVersion(config); + ContainerLayoutVersion layoutVersion = + ContainerLayoutVersion.getConfiguredVersion(config); KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, layoutVersion, getContainerSize(config), diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java index c5d4d156c8b..cfdc924486a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java @@ -163,7 +163,7 @@ public static void generateConfigurations(String path, generatedConfig.setProperties(requiredProperties); File output = new File(path, "ozone-site.xml"); - if(output.createNewFile()){ + if (output.createNewFile()) { JAXBContext context = JAXBContext.newInstance(OzoneConfiguration.XMLConfiguration.class); Marshaller m = context.createMarshaller(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java deleted file mode 100644 index 69120dae7ac..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.genesis; - -import java.nio.ByteBuffer; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.util.NativeCRC32Wrapper; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.infra.Blackhole; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Class to benchmark hadoop native CRC implementations in batch node. - * - * The hadoop native libraries must be available to run this test. libhadoop.so - * is not currently bundled with Ozone, so it needs to be obtained from a Hadoop - * build and the test needs to be executed on a compatible OS (ie Linux x86): - * - * ozone --jvmargs -Djava.library.path=/home/sodonnell/native genesis -b - * BenchmarkCRCBatch - */ -public class BenchMarkCRCBatch { - - private static int dataSize = 64 * 1024 * 1024; - - /** - * Benchmark state. - */ - @State(Scope.Thread) - public static class BenchmarkState { - - private final ByteBuffer data = ByteBuffer.allocate(dataSize); - - @Param({"512", "1024", "2048", "4096", "32768", "1048576"}) - private int checksumSize; - - @Param({"nativeCRC32", "nativeCRC32C"}) - private String crcImpl; - - private byte[] checksumBuffer; - private int nativeChecksumType = 1; - - public ByteBuffer data() { - return data; - } - - public int checksumSize() { - return checksumSize; - } - - public String crcImpl() { - return crcImpl; - } - - @edu.umd.cs.findbugs.annotations.SuppressFBWarnings( - value="EI_EXPOSE_REP", - justification="The intent is to expose this variable") - public byte[] checksumBuffer() { - return checksumBuffer; - } - - public int nativeChecksumType() { - return nativeChecksumType; - } - - @Setup(Level.Trial) - public void setUp() { - switch (crcImpl) { - case "nativeCRC32": - if (NativeCRC32Wrapper.isAvailable()) { - nativeChecksumType = NativeCRC32Wrapper.CHECKSUM_CRC32; - checksumBuffer = new byte[4 * dataSize / checksumSize]; - } else { - throw new RuntimeException("Native library is not available"); - } - break; - case "nativeCRC32C": - if (NativeCRC32Wrapper.isAvailable()) { - nativeChecksumType = NativeCRC32Wrapper.CHECKSUM_CRC32C; - checksumBuffer = new byte[4 * dataSize / checksumSize]; - } else { - throw new RuntimeException("Native library is not available"); - } - break; - default: - } - data.put(RandomUtils.nextBytes(data.remaining())); - } - } - - @Benchmark - @Threads(1) - @Warmup(iterations = 3, time = 1000, timeUnit = MILLISECONDS) - @Fork(value = 1, warmups = 0) - @Measurement(iterations = 5, time = 2000, timeUnit = MILLISECONDS) - @BenchmarkMode(Mode.Throughput) - public void runCRCNativeBatch(Blackhole blackhole, BenchmarkState state) { - if (state.crcImpl.equals("nativeCRC32") - || state.crcImpl.equals("nativeCRC32C")) { - NativeCRC32Wrapper.calculateChunkedSumsByteArray( - state.checksumSize, state.nativeChecksumType, state.checksumBuffer, - 0, state.data.array(), 0, state.data.capacity()); - blackhole.consume(state.checksumBuffer); - } else { - throw new RuntimeException("Batch mode not available for " - + state.crcImpl); - } - } - - public static void main(String[] args) throws Exception { - org.openjdk.jmh.Main.main(args); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java deleted file mode 100644 index 669d858e3d4..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.genesis; - -import java.nio.ByteBuffer; - -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.common.ChecksumByteBuffer; -import org.apache.hadoop.ozone.common.ChecksumByteBufferFactory; -import org.apache.hadoop.ozone.common.ChecksumByteBufferImpl; -import org.apache.hadoop.ozone.common.NativeCheckSumCRC32; -import org.apache.hadoop.ozone.common.PureJavaCrc32ByteBuffer; -import org.apache.hadoop.ozone.common.PureJavaCrc32CByteBuffer; -import org.apache.hadoop.util.NativeCRC32Wrapper; -import org.apache.hadoop.util.PureJavaCrc32; -import org.apache.hadoop.util.PureJavaCrc32C; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.BenchmarkMode; -import org.openjdk.jmh.annotations.Fork; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Mode; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.infra.Blackhole; - -import java.util.zip.CRC32; - -import static java.util.concurrent.TimeUnit.MILLISECONDS; - -/** - * Class to benchmark various CRC implementations. This can be executed via - * - * ozone genesis -b BenchmarkCRC - * - * However there are some points to keep in mind. java.util.zip.CRC32C is not - * available until Java 9, therefore if the JVM has a lower version than 9, that - * implementation will not be tested. - * - * The hadoop native libraries will only be tested if libhadoop.so is found on - * the "-Djava.library.path". libhadoop.so is not currently bundled with Ozone, - * so it needs to be obtained from a Hadoop build and the test needs to be - * executed on a compatible OS (ie Linux x86): - * - * ozone --jvmargs -Djava.library.path=/home/sodonnell/native genesis -b - * BenchmarkCRC - */ -public class BenchMarkCRCStreaming { - - private static int dataSize = 64 * 1024 * 1024; - - /** - * Benchmark state. - */ - @State(Scope.Thread) - public static class BenchmarkState { - - private final ByteBuffer data = ByteBuffer.allocate(dataSize); - - @Param({"512", "1024", "2048", "4096", "32768", "1048576"}) - private int checksumSize; - - @Param({"pureCRC32", "pureCRC32C", "hadoopCRC32C", "hadoopCRC32", - "zipCRC32", "zipCRC32C", "nativeCRC32", "nativeCRC32C"}) - private String crcImpl; - - private ChecksumByteBuffer checksum; - - public ChecksumByteBuffer checksum() { - return checksum; - } - - public String crcImpl() { - return crcImpl; - } - - public int checksumSize() { - return checksumSize; - } - - @Setup(Level.Trial) - public void setUp() { - switch (crcImpl) { - case "pureCRC32": - checksum = new PureJavaCrc32ByteBuffer(); - break; - case "pureCRC32C": - checksum = new PureJavaCrc32CByteBuffer(); - break; - case "hadoopCRC32": - checksum = new ChecksumByteBufferImpl(new PureJavaCrc32()); - break; - case "hadoopCRC32C": - checksum = new ChecksumByteBufferImpl(new PureJavaCrc32C()); - break; - case "zipCRC32": - checksum = new ChecksumByteBufferImpl(new CRC32()); - break; - case "zipCRC32C": - try { - checksum = new ChecksumByteBufferImpl( - ChecksumByteBufferFactory.Java9Crc32CFactory.createChecksum()); - } catch (Throwable e) { - throw new RuntimeException("zipCRC32C is not available pre Java 9"); - } - break; - case "nativeCRC32": - if (NativeCRC32Wrapper.isAvailable()) { - checksum = new ChecksumByteBufferImpl(new NativeCheckSumCRC32( - NativeCRC32Wrapper.CHECKSUM_CRC32, checksumSize)); - } else { - throw new RuntimeException("Native library is not available"); - } - break; - case "nativeCRC32C": - if (NativeCRC32Wrapper.isAvailable()) { - checksum = new ChecksumByteBufferImpl(new NativeCheckSumCRC32( - NativeCRC32Wrapper.CHECKSUM_CRC32C, checksumSize)); - } else { - throw new RuntimeException("Native library is not available"); - } - break; - default: - } - data.clear(); - data.put(RandomUtils.nextBytes(data.remaining())); - } - } - - @Benchmark - @Threads(1) - @Warmup(iterations = 3, time = 1000, timeUnit = MILLISECONDS) - @Fork(value = 1, warmups = 0) - @Measurement(iterations = 5, time = 2000, timeUnit = MILLISECONDS) - @BenchmarkMode(Mode.Throughput) - public void runCRC(Blackhole blackhole, BenchmarkState state) { - ByteBuffer data = state.data; - data.clear(); - ChecksumByteBuffer csum = state.checksum; - int bytesPerCheckSum = state.checksumSize; - - for (int i=0; i ids) throws IOException { - Objects.requireNonNull(ids, "ids == null"); - Preconditions.checkArgument(ids.iterator().hasNext()); - List dns = new ArrayList<>(); - ids.forEach(dns::add); - final Pipeline pipeline = Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setReplicationConfig( - new StandaloneReplicationConfig(ReplicationFactor.ONE)) - .setNodes(dns) - .build(); - return pipeline; - } - - public static Pipeline createSingleNodePipeline(String containerName) - throws IOException { - return createPipeline(containerName, 1); - } - - /** - * Create a pipeline with single node replica. - * - * @return Pipeline with single node in it. - * @throws IOException - */ - public static Pipeline createPipeline(String containerName, int numNodes) - throws IOException { - Preconditions.checkArgument(numNodes >= 1); - final List ids = new ArrayList<>(numNodes); - for (int i = 0; i < numNodes; i++) { - ids.add(GenesisUtil.createDatanodeDetails(UUID.randomUUID())); - } - return createPipeline(containerName, ids); - } - - @Setup(Level.Trial) - public void initialize() throws IOException { - stateMap = new ContainerStateMap(); - runCount = new AtomicInteger(0); - Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString()); - Preconditions.checkNotNull(pipeline, "Pipeline cannot be null."); - int currentCount = 1; - for (int x = 1; x < 1000; x++) { - try { - ContainerInfo containerInfo = new ContainerInfo.Builder() - .setState(CLOSED) - .setPipelineID(pipeline.getId()) - .setReplicationConfig(pipeline.getReplicationConfig()) - .setUsedBytes(0) - .setNumberOfKeys(0) - .setStateEnterTime(Time.now()) - .setOwner(OzoneConsts.OZONE) - .setContainerID(x) - .setDeleteTransactionId(0) - .build(); - stateMap.addContainer(containerInfo); - currentCount++; - } catch (SCMException e) { - e.printStackTrace(); - } - } - for (int y = currentCount; y < 50000; y++) { - try { - ContainerInfo containerInfo = new ContainerInfo.Builder() - .setState(OPEN) - .setPipelineID(pipeline.getId()) - .setReplicationConfig(pipeline.getReplicationConfig()) - .setUsedBytes(0) - .setNumberOfKeys(0) - .setStateEnterTime(Time.now()) - .setOwner(OzoneConsts.OZONE) - .setContainerID(y) - .setDeleteTransactionId(0) - .build(); - stateMap.addContainer(containerInfo); - currentCount++; - } catch (SCMException e) { - e.printStackTrace(); - } - } - try { - ContainerInfo containerInfo = new ContainerInfo.Builder() - .setState(OPEN) - .setPipelineID(pipeline.getId()) - .setReplicationConfig(pipeline.getReplicationConfig()) - .setUsedBytes(0) - .setNumberOfKeys(0) - .setStateEnterTime(Time.now()) - .setOwner(OzoneConsts.OZONE) - .setContainerID(currentCount++) - .setDeleteTransactionId(0) - .build(); - stateMap.addContainer(containerInfo); - } catch (SCMException e) { - e.printStackTrace(); - } - - containerID = new AtomicInteger(currentCount++); - - } - - @Benchmark - public void createContainerBenchMark(BenchMarkContainerStateMap state, - Blackhole bh) throws IOException { - ContainerInfo containerInfo = getContainerInfo(state); - state.stateMap.addContainer(containerInfo); - } - - private ContainerInfo getContainerInfo(BenchMarkContainerStateMap state) - throws IOException { - Pipeline pipeline = createSingleNodePipeline(UUID.randomUUID().toString()); - int cid = state.containerID.incrementAndGet(); - return new ContainerInfo.Builder() - .setState(CLOSED) - .setPipelineID(pipeline.getId()) - .setReplicationConfig(pipeline.getReplicationConfig()) - .setUsedBytes(0) - .setNumberOfKeys(0) - .setStateEnterTime(Time.now()) - .setOwner(OzoneConsts.OZONE) - .setContainerID(cid) - .setDeleteTransactionId(0) - .build(); - } - - @Benchmark - public void getMatchingContainerBenchMark(BenchMarkContainerStateMap state, - Blackhole bh) throws IOException { - if(runCount.incrementAndGet() % errorFrequency == 0) { - state.stateMap.addContainer(getContainerInfo(state)); - } - bh.consume(state.stateMap - .getMatchingContainerIDs(OPEN, OzoneConsts.OZONE, - ReplicationConfig.fromProtoTypeAndFactor( - ReplicationType.STAND_ALONE, ReplicationFactor.ONE))); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java deleted file mode 100644 index c00e27effc4..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ /dev/null @@ -1,339 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.genesis; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumData; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; -import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; -import org.apache.hadoop.ozone.container.common.impl.ContainerSet; -import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; - -import com.google.common.collect.Maps; -import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.container.common.volume.StorageVolume; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; - -/** - * Benchmarks DatanodeDispatcher class. - */ -@State(Scope.Benchmark) -public class BenchMarkDatanodeDispatcher { - - private String baseDir; - private String datanodeUuid; - private HddsDispatcher dispatcher; - private ByteString data; - private Random random; - private AtomicInteger containerCount; - private AtomicInteger keyCount; - private AtomicInteger chunkCount; - - private static final int INIT_CONTAINERS = 100; - private static final int INIT_KEYS = 50; - private static final int INIT_CHUNKS = 100; - public static final int CHUNK_SIZE = 1048576; - - private List containers; - private List keys; - private List chunks; - private MutableVolumeSet volumeSet; - - @Setup(Level.Trial) - public void initialize() throws IOException { - datanodeUuid = UUID.randomUUID().toString(); - - // 1 MB of data - data = ByteString.copyFromUtf8(RandomStringUtils.randomAscii(CHUNK_SIZE)); - random = new Random(); - OzoneConfiguration conf = new OzoneConfiguration(); - baseDir = System.getProperty("java.io.tmpdir") + File.separator + - datanodeUuid; - - // data directory - conf.set("dfs.datanode.data.dir", baseDir + File.separator + "data"); - - //We need 100 * container size minimum space - conf.set("ozone.scm.container.size", "10MB"); - - ContainerSet containerSet = new ContainerSet(); - volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, - StorageVolume.VolumeType.DATA_VOLUME, null); - StateContext context = new StateContext( - conf, DatanodeStates.RUNNING, null); - ContainerMetrics metrics = ContainerMetrics.create(conf); - Map handlers = Maps.newHashMap(); - for (ContainerProtos.ContainerType containerType : - ContainerProtos.ContainerType.values()) { - Handler handler = Handler.getHandlerForContainerType( - containerType, conf, "datanodeid", - containerSet, volumeSet, metrics, - c -> {}); - handler.setClusterID("scm"); - handlers.put(containerType, handler); - } - dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, - context, metrics, null); - dispatcher.init(); - - containerCount = new AtomicInteger(); - keyCount = new AtomicInteger(); - chunkCount = new AtomicInteger(); - - containers = new ArrayList<>(); - keys = new ArrayList<>(); - chunks = new ArrayList<>(); - - // Create containers - for (int x = 0; x < INIT_CONTAINERS; x++) { - long containerID = HddsUtils.getTime() + x; - ContainerCommandRequestProto req = getCreateContainerCommand(containerID); - dispatcher.dispatch(req, null); - containers.add(containerID); - containerCount.getAndIncrement(); - } - - for (int x = 0; x < INIT_KEYS; x++) { - keys.add(HddsUtils.getTime()+x); - } - - for (int x = 0; x < INIT_CHUNKS; x++) { - chunks.add("chunk-" + x); - } - - // Add chunk and keys to the containers - for (int x = 0; x < INIT_KEYS; x++) { - String chunkName = chunks.get(x); - chunkCount.getAndIncrement(); - long key = keys.get(x); - keyCount.getAndIncrement(); - for (int y = 0; y < INIT_CONTAINERS; y++) { - long containerID = containers.get(y); - BlockID blockID = new BlockID(containerID, key); - dispatcher - .dispatch(getPutBlockCommand(blockID, chunkName), null); - dispatcher.dispatch(getWriteChunkCommand(blockID, chunkName), null); - } - } - } - - @TearDown(Level.Trial) - public void cleanup() throws IOException { - volumeSet.shutdown(); - FileUtils.deleteDirectory(new File(baseDir)); - } - - private ContainerCommandRequestProto getCreateContainerCommand( - long containerID) { - ContainerCommandRequestProto.Builder request = - ContainerCommandRequestProto.newBuilder(); - request.setCmdType(ContainerProtos.Type.CreateContainer); - request.setContainerID(containerID); - request.setCreateContainer( - ContainerProtos.CreateContainerRequestProto.getDefaultInstance()); - request.setDatanodeUuid(datanodeUuid); - request.setTraceID(containerID + "-trace"); - return request.build(); - } - - private ContainerCommandRequestProto getWriteChunkCommand( - BlockID blockID, String chunkName) { - WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(getChunkInfo(blockID, chunkName)) - .setData(data); - - ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto - .newBuilder(); - request.setCmdType(ContainerProtos.Type.WriteChunk) - .setContainerID(blockID.getContainerID()) - .setTraceID(getBlockTraceID(blockID)) - .setDatanodeUuid(datanodeUuid) - .setWriteChunk(writeChunkRequest); - return request.build(); - } - - private ContainerCommandRequestProto getReadChunkCommand( - BlockID blockID, String chunkName) { - ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .setChunkData(getChunkInfo(blockID, chunkName)) - .setReadChunkVersion(ContainerProtos.ReadChunkVersion.V1); - - ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto - .newBuilder(); - request.setCmdType(ContainerProtos.Type.ReadChunk) - .setContainerID(blockID.getContainerID()) - .setTraceID(getBlockTraceID(blockID)) - .setDatanodeUuid(datanodeUuid) - .setReadChunk(readChunkRequest); - return request.build(); - } - - private ContainerProtos.ChunkInfo getChunkInfo( - BlockID blockID, String chunkName) { - ContainerProtos.ChunkInfo.Builder builder = - ChunkInfo.newBuilder() - .setChunkName( - DigestUtils.md5Hex(chunkName) - + "_stream_" + blockID.getContainerID() + "_block_" - + blockID.getLocalID()) - .setChecksumData( - ChecksumData.newBuilder() - .setBytesPerChecksum(4) - .setType(ChecksumType.CRC32) - .build()) - .setOffset(0).setLen(data.size()); - return builder.build(); - } - - private ContainerCommandRequestProto getPutBlockCommand( - BlockID blockID, String chunkKey) { - PutBlockRequestProto.Builder putBlockRequest = PutBlockRequestProto - .newBuilder() - .setBlockData(getBlockData(blockID, chunkKey)); - - ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto - .newBuilder(); - request.setCmdType(ContainerProtos.Type.PutBlock) - .setContainerID(blockID.getContainerID()) - .setTraceID(getBlockTraceID(blockID)) - .setDatanodeUuid(datanodeUuid) - .setPutBlock(putBlockRequest); - return request.build(); - } - - private ContainerCommandRequestProto getGetBlockCommand(BlockID blockID) { - GetBlockRequestProto.Builder readBlockRequest = - GetBlockRequestProto.newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()); - ContainerCommandRequestProto.Builder request = ContainerCommandRequestProto - .newBuilder() - .setCmdType(ContainerProtos.Type.GetBlock) - .setContainerID(blockID.getContainerID()) - .setTraceID(getBlockTraceID(blockID)) - .setDatanodeUuid(datanodeUuid) - .setGetBlock(readBlockRequest); - return request.build(); - } - - private ContainerProtos.BlockData getBlockData( - BlockID blockID, String chunkKey) { - ContainerProtos.BlockData.Builder builder = ContainerProtos.BlockData - .newBuilder() - .setBlockID(blockID.getDatanodeBlockIDProtobuf()) - .addChunks(getChunkInfo(blockID, chunkKey)); - return builder.build(); - } - - @Benchmark - public void createContainer(BenchMarkDatanodeDispatcher bmdd) { - long containerID = RandomUtils.nextLong(); - ContainerCommandRequestProto req = getCreateContainerCommand(containerID); - bmdd.dispatcher.dispatch(req, null); - bmdd.containers.add(containerID); - bmdd.containerCount.getAndIncrement(); - } - - @Benchmark - public void writeChunk(BenchMarkDatanodeDispatcher bmdd) { - bmdd.dispatcher.dispatch(getWriteChunkCommand( - getRandomBlockID(), getNewChunkToWrite()), null); - } - - @Benchmark - public void readChunk(BenchMarkDatanodeDispatcher bmdd) { - BlockID blockID = getRandomBlockID(); - String chunkKey = getRandomChunkToRead(); - bmdd.dispatcher.dispatch(getReadChunkCommand(blockID, chunkKey), null); - } - - @Benchmark - public void putBlock(BenchMarkDatanodeDispatcher bmdd) { - BlockID blockID = getRandomBlockID(); - String chunkKey = getNewChunkToWrite(); - bmdd.dispatcher.dispatch(getPutBlockCommand(blockID, chunkKey), null); - } - - @Benchmark - public void getBlock(BenchMarkDatanodeDispatcher bmdd) { - BlockID blockID = getRandomBlockID(); - bmdd.dispatcher.dispatch(getGetBlockCommand(blockID), null); - } - - // Chunks writes from benchmark only reaches certain containers - // Use INIT_CHUNKS instead of updated counters to guarantee - // key/chunks are readable. - - private BlockID getRandomBlockID() { - return new BlockID(getRandomContainerID(), getRandomKeyID()); - } - - private long getRandomContainerID() { - return containers.get(random.nextInt(INIT_CONTAINERS)); - } - - private long getRandomKeyID() { - return keys.get(random.nextInt(INIT_KEYS)); - } - - private String getRandomChunkToRead() { - return chunks.get(random.nextInt(INIT_CHUNKS)); - } - - private String getNewChunkToWrite() { - return "chunk-" + chunkCount.getAndIncrement(); - } - - private String getBlockTraceID(BlockID blockID) { - return blockID.getContainerID() + "-" + blockID.getLocalID() +"-trace"; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java deleted file mode 100644 index 641239291cb..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkOzoneManager.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.ozone.genesis; - -import java.io.File; -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.locks.ReentrantLock; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.security.UserGroupInformation; - -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.infra.Blackhole; - -/** - * Benchmarks OzoneManager. - */ -@State(Scope.Thread) -public class BenchMarkOzoneManager { - - private static String testDir; - private static OzoneManager om; - private static StorageContainerManager scm; - private static ReentrantLock lock = new ReentrantLock(); - private static String volumeName = UUID.randomUUID().toString(); - private static String bucketName = UUID.randomUUID().toString(); - private static List keyNames = new ArrayList<>(); - private static List clientIDs = new ArrayList<>(); - - private static int numPipelines = 1; - private static int numContainersPerPipeline = 3; - - @Setup(Level.Trial) - public static void initialize() - throws Exception { - try { - lock.lock(); - if (scm == null) { - OzoneConfiguration conf = new OzoneConfiguration(); - testDir = GenesisUtil.getTempPath() - .resolve(RandomStringUtils.randomNumeric(7)).toString(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir); - - GenesisUtil.configureSCM(conf, 10); - GenesisUtil.configureOM(conf, 20); - conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - numContainersPerPipeline); - GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf); - - scm = GenesisUtil.getScm(conf, new SCMConfigurator()); - scm.start(); - om = GenesisUtil.getOm(conf); - om.start(); - - // prepare SCM - PipelineManager pipelineManager = scm.getPipelineManager(); - for (Pipeline pipeline : pipelineManager - .getPipelines( - new RatisReplicationConfig(ReplicationFactor.THREE))) { - pipelineManager.openPipeline(pipeline.getId()); - } - scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, - new SCMSafeModeManager.SafeModeStatus(false, false)); - Thread.sleep(1000); - - // prepare OM - om.createVolume(new OmVolumeArgs.Builder().setVolume(volumeName) - .setAdminName(UserGroupInformation.getLoginUser().getUserName()) - .setOwnerName(UserGroupInformation.getLoginUser().getUserName()) - .build()); - om.createBucket(new OmBucketInfo.Builder().setBucketName(bucketName) - .setVolumeName(volumeName).build()); - createKeys(100000); - } - } finally { - lock.unlock(); - } - } - - private static void createKeys(int numKeys) throws IOException { - for (int i = 0; i < numKeys; i++) { - String key = UUID.randomUUID().toString(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key) - .setDataSize(0) - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .build(); - OpenKeySession keySession = om.getKeyManager().openKey(omKeyArgs); - long clientID = keySession.getId(); - keyNames.add(key); - clientIDs.add(clientID); - } - } - - @TearDown(Level.Trial) - public static void tearDown() { - try { - lock.lock(); - if (scm != null) { - scm.stop(); - scm.join(); - scm = null; - om.stop(); - om.join(); - om = null; - FileUtil.fullyDelete(new File(testDir)); - } - } finally { - lock.unlock(); - } - } - - @Threads(4) - @Benchmark - public void allocateBlockBenchMark(BenchMarkOzoneManager state, - Blackhole bh) throws IOException { - int index = (int) (Math.random() * keyNames.size()); - String key = keyNames.get(index); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key) - .setDataSize(50) - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .build(); - state.om.allocateBlock(omKeyArgs, clientIDs.get(index), new ExcludeList()); - } - - @Threads(4) - @Benchmark - public void createAndCommitKeyBenchMark(BenchMarkOzoneManager state, - Blackhole bh) throws IOException { - String key = UUID.randomUUID().toString(); - OmKeyArgs omKeyArgs = new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(key) - .setDataSize(50) - .setReplicationConfig( - new RatisReplicationConfig(ReplicationFactor.THREE)) - .build(); - OpenKeySession openKeySession = state.om.openKey(omKeyArgs); - state.om.allocateBlock(omKeyArgs, openKeySession.getId(), - new ExcludeList()); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java deleted file mode 100644 index a7e8f82f92c..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkSCM.java +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.ozone.genesis; - -import java.io.File; -import java.io.IOException; -import java.util.concurrent.locks.ReentrantLock; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.scm.block.BlockManager; -import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; - -import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; -import org.openjdk.jmh.annotations.Threads; -import org.openjdk.jmh.infra.Blackhole; - -/** - * Benchmarks BlockManager class. - */ -@State(Scope.Thread) -public class BenchMarkSCM { - - private static String testDir; - private static StorageContainerManager scm; - private static BlockManager blockManager; - private static ReentrantLock lock = new ReentrantLock(); - - @Param({ "1", "10", "100", "1000", "10000", "100000" }) - private static int numPipelines; - @Param({ "3", "10", "100" }) - private static int numContainersPerPipeline; - - @Setup(Level.Trial) - public static void initialize() - throws Exception { - try { - lock.lock(); - if (scm == null) { - OzoneConfiguration conf = new OzoneConfiguration(); - testDir = GenesisUtil.getTempPath() - .resolve(RandomStringUtils.randomNumeric(7)).toString(); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir); - - GenesisUtil.configureSCM(conf, 10); - conf.setInt(OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - numContainersPerPipeline); - GenesisUtil.addPipelines(ReplicationFactor.THREE, numPipelines, conf); - - scm = GenesisUtil.getScm(conf, new SCMConfigurator()); - scm.start(); - blockManager = scm.getScmBlockManager(); - - // prepare SCM - PipelineManager pipelineManager = scm.getPipelineManager(); - for (Pipeline pipeline : pipelineManager - .getPipelines( - new RatisReplicationConfig(ReplicationFactor.THREE))) { - pipelineManager.openPipeline(pipeline.getId()); - } - scm.getEventQueue().fireEvent(SCMEvents.SAFE_MODE_STATUS, - new SCMSafeModeManager.SafeModeStatus(false, false)); - Thread.sleep(1000); - } - } finally { - lock.unlock(); - } - } - - @TearDown(Level.Trial) - public static void tearDown() { - try { - lock.lock(); - if (scm != null) { - scm.stop(); - scm.join(); - scm = null; - FileUtil.fullyDelete(new File(testDir)); - } - } finally { - lock.unlock(); - } - } - - @Threads(4) - @Benchmark - public void allocateBlockBenchMark(BenchMarkSCM state, - Blackhole bh) throws IOException { - BenchMarkSCM.blockManager - .allocateBlock(50, new RatisReplicationConfig(ReplicationFactor.THREE), - "Genesis", new ExcludeList()); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkBlockDataToString.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkBlockDataToString.java deleted file mode 100644 index ecb10dbd022..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkBlockDataToString.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.genesis; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.builder.ToStringBuilder; -import org.apache.commons.lang3.builder.ToStringStyle; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.client.ContainerBlockID; -import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.infra.Blackhole; - -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ThreadLocalRandom; - -/** - * Benchmarks various implementations of {@link BlockData#toString}. - */ -@State(Scope.Benchmark) -public class BenchmarkBlockDataToString { - - @Param("1000") - private int count; - - @Param({"112"}) - private int capacity; - - private List data; - private List values; - - @Setup - public void createData() { - ThreadLocalRandom rnd = ThreadLocalRandom.current(); - data = new ArrayList<>(count); - values = new ArrayList<>(count); - for (int i = 0; i < count; i++) { - BlockID blockID = new BlockID(rnd.nextLong(), rnd.nextLong()); - BlockData item = new BlockData(blockID); - item.setBlockCommitSequenceId(rnd.nextLong()); - data.add(item); - values.add(item.toString()); - } - } - - @Benchmark - public void usingToStringBuilderDefaultCapacity( - BenchmarkBlockDataToString state, Blackhole sink) { - for (int i = 0; i < state.count; i++) { - BlockData item = state.data.get(i); - String str = new ToStringBuilder(item, ToStringStyle.NO_CLASS_NAME_STYLE) - .append("blockId", item.getBlockID().toString()) - .append("size", item.getSize()) - .toString(); - sink.consume(str); - Preconditions.checkArgument(str.equals(state.values.get(i))); - } - } - - @Benchmark - public void usingToStringBuilder( - BenchmarkBlockDataToString state, Blackhole sink) { - for (int i = 0; i < state.count; i++) { - BlockData item = state.data.get(i); - String str = new ToStringBuilder(item, ToStringStyle.NO_CLASS_NAME_STYLE, - new StringBuffer(capacity)) - .append("blockId", item.getBlockID().toString()) - .append("size", item.getSize()) - .toString(); - sink.consume(str); - Preconditions.checkArgument(str.equals(state.values.get(i))); - } - } - - @Benchmark - public void usingSimpleStringBuilder( - BenchmarkBlockDataToString state, Blackhole sink) { - for (int i = 0; i < state.count; i++) { - BlockData item = state.data.get(i); - String str = new StringBuilder(capacity) - .append("[") - .append("blockId=") - .append(item.getBlockID()) - .append(",size=") - .append(item.getSize()) - .append("]") - .toString(); - sink.consume(str); - Preconditions.checkArgument(str.equals(state.values.get(i))); - } - } - - @Benchmark - public void usingPushDownStringBuilder( - BenchmarkBlockDataToString state, Blackhole sink) { - for (int i = 0; i < state.count; i++) { - BlockData item = state.data.get(i); - StringBuilder sb = new StringBuilder(capacity); - item.appendTo(sb); - String str = sb.toString(); - sink.consume(str); - Preconditions.checkArgument(str.equals(state.values.get(i))); - } - } - - @Benchmark - public void usingConcatenation( - BenchmarkBlockDataToString state, Blackhole sink) { - for (int i = 0; i < state.count; i++) { - BlockData item = state.data.get(i); - String str = "[blockId=" + - item.getBlockID() + - ",size=" + - item.getSize() + - "]"; - sink.consume(str); - Preconditions.checkArgument(str.equals(state.values.get(i))); - } - } - - @Benchmark - public void usingInlineStringBuilder( - BenchmarkBlockDataToString state, Blackhole sink) { - for (int i = 0; i < state.count; i++) { - BlockData item = state.data.get(i); - BlockID blockID = item.getBlockID(); - ContainerBlockID containerBlockID = blockID.getContainerBlockID(); - String str = new StringBuilder(capacity) - .append("[") - .append("blockId=") - .append("conID: ") - .append(containerBlockID.getContainerID()) - .append(" locID: ") - .append(containerBlockID.getLocalID()) - .append(" bcsId: ") - .append(blockID.getBlockCommitSequenceId()) - .append(",size=") - .append(item.getSize()) - .append("]") - .toString(); - sink.consume(str); - Preconditions.checkArgument(str.equals(state.values.get(i))); - } - } - -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java deleted file mode 100644 index c3299e395f8..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java +++ /dev/null @@ -1,180 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.genesis; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.common.ChunkBuffer; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; -import org.apache.hadoop.ozone.container.common.volume.HddsVolume; -import org.apache.hadoop.ozone.container.common.volume.ImmutableVolumeSet; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; -import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; -import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; - -import static java.nio.charset.StandardCharsets.UTF_8; -import org.apache.commons.io.FileUtils; -import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; -import org.openjdk.jmh.annotations.Benchmark; -import org.openjdk.jmh.annotations.Level; -import org.openjdk.jmh.annotations.Measurement; -import org.openjdk.jmh.annotations.Param; -import org.openjdk.jmh.annotations.Scope; -import org.openjdk.jmh.annotations.Setup; -import org.openjdk.jmh.annotations.State; -import org.openjdk.jmh.annotations.TearDown; -import org.openjdk.jmh.annotations.Warmup; -import org.openjdk.jmh.infra.Blackhole; - -/** - * Benchmark for ChunkManager implementations. - */ -@Warmup(time = 1, timeUnit = TimeUnit.SECONDS) -@Measurement(time = 1, timeUnit = TimeUnit.SECONDS) -public class BenchmarkChunkManager { - - private static final String DEFAULT_TEST_DATA_DIR = - "target" + File.separator + "test" + File.separator + "data"; - - private static final AtomicLong CONTAINER_COUNTER = new AtomicLong(); - - private static final DispatcherContext WRITE_STAGE = - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) - .build(); - - private static final DispatcherContext COMMIT_STAGE = - new DispatcherContext.Builder() - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) - .build(); - - private static final long CONTAINER_SIZE = OzoneConsts.GB; - private static final long BLOCK_SIZE = 256 * OzoneConsts.MB; - - private static final String SCM_ID = UUID.randomUUID().toString(); - private static final String DATANODE_ID = UUID.randomUUID().toString(); - - /** - * State for the benchmark. - */ - @State(Scope.Benchmark) - public static class BenchmarkState { - - @Param({"1048576", "4194304", "16777216", "67108864"}) - private int chunkSize; - - private File dir; - private ChunkBuffer buffer; - private VolumeSet volumeSet; - private OzoneConfiguration config; - - private static File getTestDir() throws IOException { - File dir = new File(DEFAULT_TEST_DATA_DIR).getAbsoluteFile(); - Files.createDirectories(dir.toPath()); - return dir; - } - - @Setup(Level.Iteration) - public void setup() throws IOException { - dir = getTestDir(); - config = new OzoneConfiguration(); - HddsVolume volume = new HddsVolume.Builder(dir.getAbsolutePath()) - .conf(config) - .datanodeUuid(DATANODE_ID) - .build(); - - volumeSet = new ImmutableVolumeSet(volume); - - byte[] arr = randomAlphanumeric(chunkSize).getBytes(UTF_8); - buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr)); - } - - @TearDown(Level.Iteration) - public void cleanup() { - FileUtils.deleteQuietly(dir); - } - } - - @Benchmark - public void writeMultipleFiles(BenchmarkState state, Blackhole sink) - throws StorageContainerException { - - ChunkManager chunkManager = new FilePerChunkStrategy(true, null, null); - benchmark(chunkManager, FILE_PER_CHUNK, state, sink); - } - - @Benchmark - public void writeSingleFile(BenchmarkState state, Blackhole sink) - throws StorageContainerException { - - ChunkManager chunkManager = new FilePerBlockStrategy(true, null, null); - benchmark(chunkManager, FILE_PER_BLOCK, state, sink); - } - - private void benchmark(ChunkManager subject, ChunkLayOutVersion layout, - BenchmarkState state, Blackhole sink) - throws StorageContainerException { - - final long containerID = CONTAINER_COUNTER.getAndIncrement(); - - KeyValueContainerData containerData = - new KeyValueContainerData(containerID, layout, - CONTAINER_SIZE, UUID.randomUUID().toString(), - DATANODE_ID); - KeyValueContainer container = - new KeyValueContainer(containerData, state.config); - container.create(state.volumeSet, (volumes, any) -> volumes.get(0), SCM_ID); - - final long blockCount = CONTAINER_SIZE / BLOCK_SIZE; - final long chunkCount = BLOCK_SIZE / state.chunkSize; - - for (long b = 0; b < blockCount; b++) { - final BlockID blockID = new BlockID(containerID, b); - - for (long c = 0; c < chunkCount; c++) { - final String chunkName = String.format("block.%d.chunk.%d", b, c); - final long offset = c * state.chunkSize; - ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, state.chunkSize); - ChunkBuffer data = state.buffer.duplicate(0, state.chunkSize); - - subject.writeChunk(container, blockID, chunkInfo, data, WRITE_STAGE); - subject.writeChunk(container, blockID, chunkInfo, data, COMMIT_STAGE); - - sink.consume(chunkInfo); - } - } - } - -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java deleted file mode 100644 index 605cc427b99..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/Genesis.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.ozone.genesis; - -import org.openjdk.jmh.profile.GCProfiler; -import org.openjdk.jmh.profile.StackProfiler; -import org.openjdk.jmh.runner.Runner; -import org.openjdk.jmh.runner.RunnerException; -import org.openjdk.jmh.runner.options.OptionsBuilder; -import picocli.CommandLine; -import picocli.CommandLine.Option; -import picocli.CommandLine.Command; - -import static org.openjdk.jmh.runner.options.TimeValue.seconds; - -/** - * Main class that executes a set of HDDS/Ozone benchmarks. - * We purposefully don't use the runner and tools classes from Hadoop. - * There are some name collisions with OpenJDK JMH package. - *

- * Hence, these classes do not use the Tool/Runner pattern of standard Hadoop - * CLI. - */ -@Command(name = "ozone genesis", - description = "Tool for running ozone benchmarks", - mixinStandardHelpOptions = true) -public final class Genesis { - - // After adding benchmark in genesis package add the benchmark name in the - // description for this option. - @Option(names = {"-b", "-benchmark", "--benchmark"}, - split = ",", description = - "Option used for specifying benchmarks to run.\n" - + "Ex. ozone genesis -benchmark BenchMarkContainerStateMap," - + "BenchMarkOMKeyAllocation.\n" - + "Possible benchmarks which can be used are " - + "{BenchMarkContainerStateMap, " - + "BenchMarkOzoneManager, BenchMarkOMClient, " - + "BenchMarkSCM, BenchMarkMetadataStoreReads, " - + "BenchMarkMetadataStoreWrites, BenchMarkDatanodeDispatcher, " - + "BenchMarkRocksDbStore, BenchMarkCRCStreaming, BenchMarkCRCBatch}") - private static String[] benchmarks; - - @Option(names = "-t", defaultValue = "4", - description = "Number of threads to use for the benchmark.\n" - + "This option can be overridden by threads mentioned in benchmark.") - private static int numThreads; - - @Option(names = "--seconds", - description = "Number of seconds to run each benchmark method.\n" - + "By default no limit is set.") - private static int seconds = -1; - - private Genesis() { - } - - public static void main(String[] args) throws RunnerException { - CommandLine commandLine = new CommandLine(new Genesis()); - commandLine.parse(args); - if (commandLine.isUsageHelpRequested()) { - commandLine.usage(System.out); - return; - } - - OptionsBuilder optionsBuilder = new OptionsBuilder(); - if (benchmarks != null) { - // The OptionsBuilder#include takes a regular expression as argument. - // Therefore it is important to keep the benchmark names unique for - // running a benchmark. For example if there are two benchmarks - - // BenchMarkOM and BenchMarkOMClient and we include BenchMarkOM then - // both the benchmarks will be run. - for (String benchmark : benchmarks) { - optionsBuilder.include(benchmark); - } - } - optionsBuilder.warmupIterations(2) - .measurementIterations(20) - .addProfiler(StackProfiler.class) - .addProfiler(GCProfiler.class) - .shouldDoGC(true) - .forks(1) - .threads(numThreads); - - if (seconds > 0) { - optionsBuilder.measurementTime(seconds(seconds)); - } - - new Runner(optionsBuilder.build()).run(); - } -} - - diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java deleted file mode 100644 index 8ba19fc1747..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisMemoryProfiler.java +++ /dev/null @@ -1,61 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -package org.apache.hadoop.ozone.genesis; - -import org.apache.hadoop.conf.StorageUnit; -import org.openjdk.jmh.infra.BenchmarkParams; -import org.openjdk.jmh.infra.IterationParams; -import org.openjdk.jmh.profile.InternalProfiler; -import org.openjdk.jmh.results.AggregationPolicy; -import org.openjdk.jmh.results.IterationResult; -import org.openjdk.jmh.results.Result; -import org.openjdk.jmh.results.ScalarResult; - -import java.util.ArrayList; -import java.util.Collection; - -/** - * Max memory profiler. - */ -public class GenesisMemoryProfiler implements InternalProfiler { - @Override - public void beforeIteration(BenchmarkParams benchmarkParams, - IterationParams iterationParams) { - - } - - @Override - public Collection afterIteration(BenchmarkParams - benchmarkParams, IterationParams iterationParams, IterationResult - result) { - long totalHeap = Runtime.getRuntime().totalMemory(); - - Collection samples = new ArrayList<>(); - samples.add(new ScalarResult("Max heap", - StorageUnit.BYTES.toGBs(totalHeap), "GBs", - AggregationPolicy.MAX)); - return samples; - } - - @Override - public String getDescription() { - return "Genesis Memory Profiler. Computes Max Memory used by a test."; - } -} - diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java deleted file mode 100644 index cffb4c4daee..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ /dev/null @@ -1,162 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.genesis; - -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.UUID; - -import org.apache.hadoop.hdds.client.RatisReplicationConfig; -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; -import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.server.SCMConfigurator; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.apache.hadoop.ozone.om.OMStorage; -import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.security.authentication.client.AuthenticationException; - -/** - * Utility class for benchmark test cases. - */ -public final class GenesisUtil { - - private GenesisUtil() { - // private constructor. - } - - public static final String DEFAULT_TYPE = "default"; - public static final String CACHE_10MB_TYPE = "Cache10MB"; - public static final String CACHE_1GB_TYPE = "Cache1GB"; - public static final String CLOSED_TYPE = "ClosedContainer"; - - private static final int DB_FILE_LEN = 7; - private static final String TMP_DIR = "java.io.tmpdir"; - private static final Random RANDOM = new Random(); - private static final String RANDOM_LOCAL_ADDRESS = "127.0.0.1:0"; - - public static Path getTempPath() { - return Paths.get(System.getProperty(TMP_DIR)); - } - - public static DatanodeDetails createDatanodeDetails(UUID uuid) { - String ipAddress = - RANDOM.nextInt(256) + "." + RANDOM.nextInt(256) + "." + RANDOM - .nextInt(256) + "." + RANDOM.nextInt(256); - - DatanodeDetails.Port containerPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.STANDALONE, 0); - DatanodeDetails.Port ratisPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.RATIS, 0); - DatanodeDetails.Port restPort = DatanodeDetails.newPort( - DatanodeDetails.Port.Name.REST, 0); - DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); - builder.setUuid(uuid) - .setHostName("localhost") - .setIpAddress(ipAddress) - .addPort(containerPort) - .addPort(ratisPort) - .addPort(restPort); - return builder.build(); - } - - static StorageContainerManager getScm(OzoneConfiguration conf, - SCMConfigurator configurator) throws IOException, - AuthenticationException { - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return StorageContainerManager.createSCM(conf, configurator); - } - - static void configureSCM(OzoneConfiguration conf, int numHandlers) { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, - RANDOM_LOCAL_ADDRESS); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, - RANDOM_LOCAL_ADDRESS); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, - RANDOM_LOCAL_ADDRESS); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, - RANDOM_LOCAL_ADDRESS); - conf.setInt(ScmConfigKeys.OZONE_SCM_HANDLER_COUNT_KEY, numHandlers); - } - - static void addPipelines(HddsProtos.ReplicationFactor factor, - int numPipelines, ConfigurationSource conf) throws Exception { - SCMMetadataStore scmMetadataStore = - new SCMMetadataStoreImpl((OzoneConfiguration)conf); - - Table pipelineTable = - scmMetadataStore.getPipelineTable(); - List nodes = new ArrayList<>(); - for (int i = 0; i < factor.getNumber(); i++) { - nodes - .add(GenesisUtil.createDatanodeDetails(UUID.randomUUID())); - } - for (int i = 0; i < numPipelines; i++) { - Pipeline pipeline = - Pipeline.newBuilder() - .setState(Pipeline.PipelineState.OPEN) - .setId(PipelineID.randomId()) - .setReplicationConfig(new RatisReplicationConfig(factor)) - .setNodes(nodes) - .build(); - pipelineTable.put(pipeline.getId(), - pipeline); - } - scmMetadataStore.getStore().close(); - } - - static OzoneManager getOm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - OMStorage omStorage = new OMStorage(conf); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if (omStorage.getState() != Storage.StorageState.INITIALIZED) { - omStorage.setClusterId(scmStore.getClusterID()); - omStorage.setOmId(UUID.randomUUID().toString()); - omStorage.initialize(); - } - return OzoneManager.createOm(conf); - } - - static void configureOM(OzoneConfiguration conf, int numHandlers) { - conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, - RANDOM_LOCAL_ADDRESS); - conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numHandlers); - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java deleted file mode 100644 index a7c8ee26486..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/package-info.java +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - * - */ - -/** - * Zephyr contains a set of benchmarks for Ozone. This is a command line tool - * that can be run by end users to get a sense of what kind of performance - * the system is capable of; Since Ozone is a new system, these benchmarks - * will allow us to correlate a base line to real world performance. - */ -package org.apache.hadoop.ozone.genesis; \ No newline at end of file diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index 740e667c4d6..af6d624ed74 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -151,7 +151,7 @@ public OzoneClient createClient(MutableConfigurationSource conf) client = createRpcClientFromHostPort(ozoneURI.getHost(), ozoneURI.getPort(), conf); } - } else {// When host is not specified + } else { // When host is not specified Collection omServiceIds = conf.getTrimmedStringCollection( OZONE_OM_SERVICE_IDS_KEY); @@ -270,7 +270,7 @@ private static URI stringToUri(String pathString) { // add leading slash to the path, if it does not exist int firstSlash = path.indexOf('/'); - if(firstSlash != 0) { + if (firstSlash != 0) { path = "/" + path; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index cfb37636cf4..2de229081eb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -48,7 +48,8 @@ RemoveAclBucketHandler.class, GetAclBucketHandler.class, SetAclBucketHandler.class, - ClearQuotaHandler.class + ClearQuotaHandler.class, + UpdateBucketHandler.class }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java index 5c076621819..e1592e521a8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java @@ -57,7 +57,7 @@ public class CreateBucketHandler extends BucketHandler { " user if not specified") private String ownerName; - enum AllowedBucketLayouts {FILE_SYSTEM_OPTIMIZED, OBJECT_STORE} + enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE } @Option(names = { "--layout", "-l" }, description = "Allowed Bucket Layouts: ${COMPLETION-CANDIDATES}", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java new file mode 100644 index 00000000000..7ba62a5ce1c --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell.bucket; + +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.client.OzoneClientException; +import org.apache.hadoop.ozone.shell.OzoneAddress; + +import picocli.CommandLine.Command; +import picocli.CommandLine.Option; + +import java.io.IOException; + +/** + * Executes update bucket calls. + */ +@Command(name = "update", + description = "Updates the parameters of the bucket") +public class UpdateBucketHandler extends BucketHandler { + + @Option(names = {"--user", "-u"}, + description = "Owner of the bucket to set") + private String ownerName; + + @Override + protected void execute(OzoneClient client, OzoneAddress address) + throws IOException, OzoneClientException { + + String volumeName = address.getVolumeName(); + String bucketName = address.getBucketName(); + OzoneBucket bucket = client.getObjectStore().getVolume(volumeName) + .getBucket(bucketName); + + if (ownerName != null && !ownerName.isEmpty()) { + boolean result = bucket.setOwner(ownerName); + if (LOG.isDebugEnabled() && !result) { + out().format("Bucket '%s' owner is already '%s'. Unchanged.%n", + volumeName + "/" + bucketName, ownerName); + } + } + + OzoneBucket updatedBucket = client.getObjectStore().getVolume(volumeName) + .getBucket(bucketName); + printObjectAsJson(updatedBucket); + } +} diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java index 93a421a2ed0..fa83bbbc41a 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java @@ -186,7 +186,7 @@ public void testQueryCommand() { @Test public void testLoadCommand() { String[] args1 = new String[]{dbName, "load", LOGS1}; - try{ + try { execute(args1, ""); fail("No exception thrown."); } catch (Exception e) { diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java index 2a5223f4ff0..75648b44197 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java @@ -54,12 +54,12 @@ public static void init() throws UnsupportedEncodingException { } @After - public void setUp(){ + public void setUp() { bout.reset(); } @AfterClass - public static void tearDown(){ + public static void tearDown() { System.setOut(psBackup); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index 2486e5786dc..b378628da4a 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -144,10 +144,10 @@ public List handleExecutionException(ExecutionException ex, throw ex; } }; - try{ + try { cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); - }catch(Exception ex){ + } catch (Exception ex) { Assert.assertTrue("Expected " + msg + ", but got: " + ex.getMessage(), ex.getMessage().contains(msg)); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java index ac1f7fd61e5..ef0cac7f8b3 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java @@ -37,7 +37,7 @@ public OzoneTestDriver(ProgramDriver pgd) { try { pgd.addClass("freon", Freon.class, "Populates ozone with data."); - } catch(Throwable e) { + } catch (Throwable e) { e.printStackTrace(); } } @@ -46,7 +46,7 @@ public void run(String[] args) { int exitCode = -1; try { exitCode = pgd.run(args); - } catch(Throwable e) { + } catch (Throwable e) { e.printStackTrace(); } @@ -55,7 +55,7 @@ public void run(String[] args) { } } - public static void main(String[] args){ + public static void main(String[] args) { new OzoneTestDriver().run(args); } } diff --git a/pom.xml b/pom.xml index 8cc35195bca..11a0ad1ee95 100644 --- a/pom.xml +++ b/pom.xml @@ -139,7 +139,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1.6.0 0.33.0 - 1.19 2.5.0 @@ -670,18 +669,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${hadoop.version} - - org.openjdk.jmh - jmh-core - ${jmh.version} - - - org.openjdk.jmh - jmh-generator-annprocess - ${jmh.version} - - - org.apache.hadoop hadoop-kms