+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds;
+
+/**
+ * Various reusable utility methods related to Java.
+ */
+public final class JavaUtils {
+ // "1.8"->8, "9"->9, "10"->10
+ private static final int JAVA_SPEC_VER = Math.max(8, Integer.parseInt(
+ System.getProperty("java.specification.version").split("\\.")[0]));
+
+ /**
+ * Query to see if major version of Java specification of the system
+ * is equal or greater than the parameter.
+ *
+ * @param version 8, 9, 10 etc.
+ * @return comparison with system property, always true for 8
+ */
+ public static boolean isJavaVersionAtLeast(int version) {
+ return JAVA_SPEC_VER >= version;
+ }
+
+ /**
+ * Private constructor.
+ */
+ private JavaUtils() {
+ }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
index c63896e9e1d1..11a82187bf7e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java
@@ -24,8 +24,8 @@
* The replication type to be used while writing key into ozone.
*/
public enum ReplicationType {
- RATIS,
- STAND_ALONE,
+ RATIS,
+ STAND_ALONE,
CHAINED;
public static ReplicationType fromProto(
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index b98c1160e3b0..3791483024a2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -24,6 +24,7 @@
import java.util.Set;
import java.util.UUID;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import com.google.common.collect.ImmutableSet;
import org.apache.hadoop.hdds.DatanodeVersions;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
@@ -369,6 +370,7 @@ public static DatanodeDetails getFromProtoBuf(
* Returns a DatanodeDetails protobuf message from a datanode ID.
* @return HddsProtos.DatanodeDetailsProto
*/
+ @JsonIgnore
public HddsProtos.DatanodeDetailsProto getProtoBufMessage() {
return toProto(CURRENT_VERSION);
}
@@ -431,6 +433,7 @@ public HddsProtos.DatanodeDetailsProto.Builder toProtoBuilder(
* Returns a ExtendedDatanodeDetails protobuf message from a datanode ID.
* @return HddsProtos.ExtendedDatanodeDetailsProto
*/
+ @JsonIgnore
public HddsProtos.ExtendedDatanodeDetailsProto getExtendedProtoBufMessage() {
HddsProtos.ExtendedDatanodeDetailsProto.Builder extendedBuilder =
HddsProtos.ExtendedDatanodeDetailsProto.newBuilder()
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index e5958b7b9046..d2a1330469af 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -246,6 +246,11 @@ public final class ScmConfigKeys {
public static final int OZONE_SCM_HEARTBEAT_RPC_RETRY_COUNT_DEFAULT =
15;
+ public static final String OZONE_SCM_HEARTBEAT_RPC_RETRY_INTERVAL =
+ "ozone.scm.heartbeat.rpc-retry-interval";
+ public static final String OZONE_SCM_HEARTBEAT_RPC_RETRY_INTERVAL_DEFAULT =
+ "1s";
+
/**
* Defines how frequently we will log the missing of heartbeat to a specific
* SCM. In the default case we will write a warning message for each 10
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index bab99b4d0868..5a7e8096f2ba 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -288,6 +288,17 @@ StatusAndMessages finalizeScmUpgrade(String upgradeClientID)
throws IOException;
StatusAndMessages queryUpgradeFinalizationProgress(String upgradeClientID,
- boolean force)
- throws IOException;
+ boolean force) throws IOException;
+
+ /**
+ * Get usage information of datanode by ipaddress or uuid.
+ *
+ * @param ipaddress datanode ipaddress String
+ * @param uuid datanode uuid String
+ * @return List of DatanodeUsageInfo. Each element contains info such as
+ * capacity, SCMused, and remaining space.
+ * @throws IOException
+ */
+ List getDatanodeUsageInfo(String ipaddress,
+ String uuid) throws IOException;
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index b8f1a926f186..0485c0329d43 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -199,6 +199,7 @@ public void updateLastUsedTime() {
lastUsed = Instant.ofEpochMilli(Time.now());
}
+ @JsonIgnore
public HddsProtos.ContainerInfoProto getProtobuf() {
HddsProtos.ContainerInfoProto.Builder builder =
HddsProtos.ContainerInfoProto.newBuilder();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java
index ed7d215e6554..0c2167c3398f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineID.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.pipeline;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import java.util.UUID;
@@ -45,6 +46,7 @@ public UUID getId() {
return id;
}
+ @JsonIgnore
public HddsProtos.PipelineID getProtobuf() {
HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder()
.setMostSigBits(id.getMostSignificantBits())
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 2cb7fe195b43..46cf170f9650 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -18,12 +18,13 @@
package org.apache.hadoop.hdds.scm.protocol;
import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.ScmInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.security.KerberosInfo;
import java.io.Closeable;
import java.io.IOException;
@@ -31,7 +32,6 @@
import java.util.Map;
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
-import org.apache.hadoop.security.KerberosInfo;
/**
* ContainerLocationProtocol is used by an HDFS node to find the set of nodes
@@ -246,6 +246,17 @@ StatusAndMessages finalizeScmUpgrade(String upgradeClientID)
throws IOException;
StatusAndMessages queryUpgradeFinalizationProgress(String upgradeClientID,
- boolean force)
- throws IOException;
+ boolean force) throws IOException;
+
+ /**
+ * Get Datanode usage information by ip or uuid.
+ *
+ * @param ipaddress - datanode IP address String
+ * @param uuid - datanode UUID String
+ * @return List of DatanodeUsageInfo. Each element contains info such as
+ * capacity, SCMused, and remaining space.
+ * @throws IOException
+ */
+ List getDatanodeUsageInfo(String ipaddress,
+ String uuid) throws IOException;
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index fdbb3a027dbb..5ac27f5e4c35 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -231,6 +231,8 @@ public enum Units {TB, GB, MB, KB, B}
// instance gets stored.
public static final String OM_CONTEXT_ATTRIBUTE = "ozone.om";
+ public static final String SCM_CONTEXT_ATTRIBUTE = "ozone.scm";
+
private OzoneConsts() {
// Never Constructed
}
@@ -398,5 +400,7 @@ private OzoneConsts() {
public static final String OM_RATIS_SNAPSHOT_DIR = "snapshot";
public static final long DEFAULT_OM_UPDATE_ID = -1L;
+ // CRL Sequence Id
+ public static final String CRL_SEQUENCE_ID_KEY = "CRL_SEQUENCE_ID";
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
index d86f7b1c40c1..db7a31eea950 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
@@ -75,8 +75,10 @@ private static Function newChecksumByteBufferFunction(
/** The algorithms for {@link ChecksumType}. */
enum Algorithm {
NONE(() -> data -> ByteString.EMPTY),
- CRC32(() -> newChecksumByteBufferFunction(PureJavaCrc32ByteBuffer::new)),
- CRC32C(() -> newChecksumByteBufferFunction(PureJavaCrc32CByteBuffer::new)),
+ CRC32(() ->
+ newChecksumByteBufferFunction(ChecksumByteBufferFactory::crc32Impl)),
+ CRC32C(() ->
+ newChecksumByteBufferFunction(ChecksumByteBufferFactory::crc32CImpl)),
SHA256(() -> newMessageDigestFunction("SHA-256")),
MD5(() -> newMessageDigestFunction("MD5"));
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferFactory.java
new file mode 100644
index 000000000000..a41af84189e7
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferFactory.java
@@ -0,0 +1,93 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.hadoop.hdds.JavaUtils;
+import org.apache.hadoop.util.PureJavaCrc32C;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.lang.invoke.MethodHandle;
+import java.lang.invoke.MethodHandles;
+import java.lang.invoke.MethodType;
+import java.util.zip.CRC32;
+import java.util.zip.Checksum;
+
+/**
+ * Class containing factories for creating various checksum impls.
+ */
+public final class ChecksumByteBufferFactory {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(ChecksumByteBufferImpl.class);
+
+ private static volatile boolean useJava9Crc32C
+ = JavaUtils.isJavaVersionAtLeast(9);
+
+ public static class Java9Crc32CFactory {
+ private static final MethodHandle NEW_CRC32C_MH;
+
+ static {
+ MethodHandle newCRC32C = null;
+ try {
+ newCRC32C = MethodHandles.publicLookup()
+ .findConstructor(
+ Class.forName("java.util.zip.CRC32C"),
+ MethodType.methodType(void.class)
+ );
+ } catch (ReflectiveOperationException e) {
+ // Should not reach here.
+ throw new RuntimeException(e);
+ }
+ NEW_CRC32C_MH = newCRC32C;
+ }
+
+ public static java.util.zip.Checksum createChecksum() {
+ try {
+ // Should throw nothing
+ return (Checksum) NEW_CRC32C_MH.invoke();
+ } catch (Throwable t) {
+ throw (t instanceof RuntimeException) ? (RuntimeException) t
+ : new RuntimeException(t);
+ }
+ }
+ };
+
+ public static ChecksumByteBuffer crc32Impl() {
+ return new ChecksumByteBufferImpl(new CRC32());
+ }
+
+ public static ChecksumByteBuffer crc32CImpl() {
+ if (useJava9Crc32C) {
+ try {
+ return new ChecksumByteBufferImpl(Java9Crc32CFactory.createChecksum());
+ } catch (Throwable e) {
+ // should not happen
+ LOG.error("CRC32C creation failed, switching to PureJavaCrc32C", e);
+ useJava9Crc32C = false;
+ }
+ }
+ return new ChecksumByteBufferImpl(new PureJavaCrc32C());
+ }
+
+ /**
+ * Private Constructor.
+ */
+ private ChecksumByteBufferFactory() {
+ }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java
new file mode 100644
index 000000000000..db779b6bc36c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumByteBufferImpl.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import java.nio.ByteBuffer;
+import java.util.zip.Checksum;
+
+public class ChecksumByteBufferImpl implements ChecksumByteBuffer {
+
+ private Checksum checksum;
+
+ public ChecksumByteBufferImpl(Checksum impl) {
+ this.checksum = impl;
+ }
+
+ @Override
+ // TODO - when we eventually move to a minimum Java version >= 9 this method
+ // should be refactored to simply call checksum.update(buffer), as the
+ // Checksum interface has been enhanced to allow this since Java 9.
+ public void update(ByteBuffer buffer) {
+ if (buffer.hasArray()) {
+ checksum.update(buffer.array(), buffer.position() + buffer.arrayOffset(),
+ buffer.remaining());
+ } else {
+ byte[] b = new byte[buffer.remaining()];
+ buffer.get(b);
+ checksum.update(b, 0, b.length);
+ }
+ }
+
+ @Override
+ public void update(byte[] b, int off, int len) {
+ checksum.update(b, off, len);
+ }
+
+ @Override
+ public void update(int i) {
+ checksum.update(i);
+ }
+
+ @Override
+ public long getValue() {
+ return checksum.getValue();
+ }
+
+ @Override
+ public void reset() {
+ checksum.reset();
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java
new file mode 100644
index 000000000000..81061acbacf0
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/NativeCheckSumCRC32.java
@@ -0,0 +1,85 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.commons.lang3.NotImplementedException;
+import org.apache.hadoop.util.NativeCRC32Wrapper;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This is a partial implementation to be used only in benchmarks.
+ *
+ * The Hadoop Native checksum libraries do not allow for updating a checksum
+ * as the java.util.zip.Checksum dictates in its update(...) method.
+ *
+ * This class allows the Native Hadoop CRC32 implementations to be called to
+ * generate checksums, provided only a single call is made to the update(...)
+ * method.
+ *
+ */
+public class NativeCheckSumCRC32 implements java.util.zip.Checksum {
+
+ // 1 for crc32, 2 for crc32c - see NativeCRC32Wrapper
+ private int checksumType;
+ private int bytesPerSum;
+
+ private ByteBuffer checksum = ByteBuffer.allocate(4);
+ private boolean needsReset = false;
+
+ public NativeCheckSumCRC32(int checksumType, int bytesPerSum) {
+ this.checksumType = checksumType;
+ this.bytesPerSum = bytesPerSum;
+ }
+
+ @Override
+ public void update(int b) {
+ throw new NotImplementedException("Update method is not implemented");
+ }
+
+ /**
+ * Calculate the checksum. Note the checksum is not updatable. You should
+ * make a single call to this method and then call getValue() to retrive the
+ * value.
+ * @param b A byte array whose contents will be used to calculate a CRC32(C)
+ * @param off The offset in the byte array to start reading.
+ * @param len The number of bytes in the byte array to read.
+ */
+ @Override
+ public void update(byte[] b, int off, int len) {
+ if (needsReset) {
+ throw new IllegalArgumentException(
+ "This checksum implementation is not updatable");
+ }
+ NativeCRC32Wrapper.calculateChunkedSumsByteArray(bytesPerSum, checksumType,
+ checksum.array(), 0, b, off, len);
+ needsReset = true;
+ }
+
+ @Override
+ public long getValue() {
+ checksum.position(0);
+ return checksum.getInt();
+ }
+
+ @Override
+ public void reset() {
+ checksum.clear();
+ needsReset = false;
+ }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java
index 0d1f6307501a..001a6454a634 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32ByteBuffer.java
@@ -21,7 +21,8 @@
* Similar to {@link org.apache.hadoop.util.PureJavaCrc32}
* except that this class implement {@link ChecksumByteBuffer}.
*/
-final class PureJavaCrc32ByteBuffer extends ChecksumByteBuffer.CrcIntTable {
+public final class PureJavaCrc32ByteBuffer extends
+ ChecksumByteBuffer.CrcIntTable {
@Override
int[] getTable() {
return T;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java
index 1c443575f817..c101c30d289b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/PureJavaCrc32CByteBuffer.java
@@ -24,7 +24,8 @@
* Similar to {@link org.apache.hadoop.util.PureJavaCrc32C}
* except that this class implement {@link ChecksumByteBuffer}.
*/
-final class PureJavaCrc32CByteBuffer extends ChecksumByteBuffer.CrcIntTable {
+public final class PureJavaCrc32CByteBuffer extends
+ ChecksumByteBuffer.CrcIntTable {
@Override
int[] getTable() {
return T;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
new file mode 100644
index 000000000000..d88cdbb16fdb
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/NativeCRC32Wrapper.java
@@ -0,0 +1,72 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import org.apache.hadoop.fs.ChecksumException;
+
+import java.nio.ByteBuffer;
+
+/**
+ * This class wraps the NativeCRC32 class in hadoop-common, because the class
+ * is package private there. The intention of making this class available
+ * in Ozone is to allow the native libraries to be benchmarked alongside other
+ * implementations. At the current time, the hadoop native CRC is not used
+ * anywhere in Ozone except for benchmarks.
+ */
+public final class NativeCRC32Wrapper {
+
+ public static final int CHECKSUM_CRC32 = NativeCrc32.CHECKSUM_CRC32;
+ public static final int CHECKSUM_CRC32C = NativeCrc32.CHECKSUM_CRC32C;
+
+ // Private constructor
+ private NativeCRC32Wrapper() {
+ }
+
+ public static boolean isAvailable() {
+ return NativeCrc32.isAvailable();
+ }
+
+ public static void verifyChunkedSums(int bytesPerSum, int checksumType,
+ ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
+ throws ChecksumException {
+ NativeCrc32.verifyChunkedSums(bytesPerSum, checksumType, sums, data,
+ fileName, basePos);
+ }
+
+ @SuppressWarnings("checkstyle:parameternumber")
+ public static void verifyChunkedSumsByteArray(int bytesPerSum,
+ int checksumType, byte[] sums, int sumsOffset, byte[] data,
+ int dataOffset, int dataLength, String fileName, long basePos)
+ throws ChecksumException {
+ NativeCrc32.verifyChunkedSumsByteArray(bytesPerSum, checksumType, sums,
+ sumsOffset, data, dataOffset, dataLength, fileName, basePos);
+ }
+
+ public static void calculateChunkedSums(int bytesPerSum, int checksumType,
+ ByteBuffer sums, ByteBuffer data) {
+ NativeCrc32.calculateChunkedSums(bytesPerSum, checksumType, sums, data);
+ }
+
+ public static void calculateChunkedSumsByteArray(int bytesPerSum,
+ int checksumType, byte[] sums, int sumsOffset, byte[] data,
+ int dataOffset, int dataLength) {
+ NativeCrc32.calculateChunkedSumsByteArray(bytesPerSum, checksumType, sums,
+ sumsOffset, data, dataOffset, dataLength);
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/package-info.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/package-info.java
new file mode 100644
index 000000000000..3d1aba5d9dd4
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/util/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains class that wrap private classes in hadoop-common
+ * util.
+ */
+package org.apache.hadoop.util;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 9311937ce629..1dece14fc471 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -969,7 +969,17 @@
OZONE, MANAGEMENT
Retry count for the RPC from Datanode to SCM. The rpc-retry-interval
- is 1s. Make sure rpc-retry-count * (rpc-timeout + rpc-retry-interval)
+ is 1s by default. Make sure rpc-retry-count * (rpc-timeout +
+ rpc-retry-interval) is less than hdds.heartbeat.interval.
+
+
+
+ ozone.scm.heartbeat.rpc-retry-interval
+ 1s
+ OZONE, MANAGEMENT
+
+ Retry interval for the RPC from Datanode to SCM.
+ Make sure rpc-retry-count * (rpc-timeout + rpc-retry-interval)
is less than hdds.heartbeat.interval.
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
index 2f466377b4b2..4aac0b4847e9 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumByteBuffer.java
@@ -33,14 +33,14 @@ public class TestChecksumByteBuffer {
@Test
public void testPureJavaCrc32ByteBuffer() {
final Checksum expected = new PureJavaCrc32();
- final ChecksumByteBuffer testee = new PureJavaCrc32ByteBuffer();
+ final ChecksumByteBuffer testee = ChecksumByteBufferFactory.crc32Impl();
new VerifyChecksumByteBuffer(expected, testee).testCorrectness();
}
@Test
public void testPureJavaCrc32CByteBuffer() {
final Checksum expected = new PureJavaCrc32C();
- final ChecksumByteBuffer testee = new PureJavaCrc32CByteBuffer();
+ final ChecksumByteBuffer testee = ChecksumByteBufferFactory.crc32CImpl();
new VerifyChecksumByteBuffer(expected, testee).testCorrectness();
}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java
new file mode 100644
index 000000000000..a098a26f9d87
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksumImplsComputeSameValues.java
@@ -0,0 +1,99 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.common;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.util.NativeCRC32Wrapper;
+import org.apache.hadoop.util.PureJavaCrc32;
+import org.apache.hadoop.util.PureJavaCrc32C;
+import org.junit.Test;
+
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.zip.CRC32;
+
+import static junit.framework.TestCase.assertEquals;
+
+public class TestChecksumImplsComputeSameValues {
+
+ private int dataSize = 1024 * 1024 * 64;
+ private ByteBuffer data = ByteBuffer.allocate(dataSize);
+ private int[] bytesPerChecksum = {512, 1024, 2048, 4096, 32768, 1048576};
+
+ @Test
+ public void testCRC32ImplsMatch() {
+ data.clear();
+ data.put(RandomUtils.nextBytes(data.remaining()));
+ for (int bpc : bytesPerChecksum) {
+ List impls = new ArrayList<>();
+ impls.add(new PureJavaCrc32ByteBuffer());
+ impls.add(new ChecksumByteBufferImpl(new PureJavaCrc32()));
+ impls.add(new ChecksumByteBufferImpl(new CRC32()));
+ if (NativeCRC32Wrapper.isAvailable()) {
+ impls.add(new ChecksumByteBufferImpl(new NativeCheckSumCRC32(1, bpc)));
+ }
+ assertEquals(true, validateImpls(data, impls, bpc));
+ }
+ }
+
+ @Test
+ public void testCRC32CImplsMatch() {
+ data.clear();
+ data.put(RandomUtils.nextBytes(data.remaining()));
+ for (int bpc : bytesPerChecksum) {
+ List impls = new ArrayList<>();
+ impls.add(new PureJavaCrc32CByteBuffer());
+ impls.add(new ChecksumByteBufferImpl(new PureJavaCrc32C()));
+ try {
+ impls.add(new ChecksumByteBufferImpl(
+ ChecksumByteBufferFactory.Java9Crc32CFactory.createChecksum()));
+ } catch (Throwable e) {
+ // NOOP
+ }
+ // impls.add(new ChecksumByteBufferImpl(new CRC32C())));
+ if (NativeCRC32Wrapper.isAvailable()) {
+ impls.add(new ChecksumByteBufferImpl(new NativeCheckSumCRC32(2, bpc)));
+ }
+ assertEquals(true, validateImpls(data, impls, bpc));
+ }
+ }
+
+ private boolean validateImpls(ByteBuffer buf, List impls,
+ int bpc) {
+ for (int i = 0; i < buf.capacity(); i += bpc) {
+ buf.position(i);
+ buf.limit(i + bpc);
+ impls.get(0).update(buf);
+ int res = (int) impls.get(0).getValue();
+ impls.get(0).reset();
+ for (int j = 1; j < impls.size(); j++) {
+ ChecksumByteBuffer csum = impls.get(j);
+ buf.position(i);
+ buf.limit(i + bpc);
+ csum.update(buf);
+ if ((int) csum.getValue() != res) {
+ return false;
+ }
+ csum.reset();
+ }
+ }
+ return true;
+ }
+
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index 4834a68f1ef7..310e9da117f4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -160,8 +160,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails,
ContainerReplicator replicator =
new DownloadAndImportReplicator(container.getContainerSet(),
container.getController(),
- new SimpleContainerDownloader(conf,
- dnCertClient != null ? dnCertClient.getCACertificate() : null),
+ new SimpleContainerDownloader(conf, dnCertClient),
new TarContainerPacker());
replicatorMetrics = new MeasuredReplicator(replicator);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index c7dd9c65ecf5..ffca599f8024 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -47,6 +47,7 @@
import static java.util.Collections.unmodifiableList;
import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmRpcTimeOutInMilliseconds;
import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmRpcRetryCount;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmRpcRetryInterval;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -151,8 +152,8 @@ public void addSCMServer(InetSocketAddress address) throws IOException {
RetryPolicy retryPolicy =
RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- getScmRpcRetryCount(conf),
- 1000, TimeUnit.MILLISECONDS);
+ getScmRpcRetryCount(conf), getScmRpcRetryInterval(conf),
+ TimeUnit.MILLISECONDS);
StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
StorageContainerDatanodeProtocolPB.class, version,
@@ -196,8 +197,8 @@ public void addReconServer(InetSocketAddress address) throws IOException {
RetryPolicy retryPolicy =
RetryPolicies.retryUpToMaximumCountWithFixedSleep(
- getScmRpcRetryCount(conf),
- 1000, TimeUnit.MILLISECONDS);
+ getScmRpcRetryCount(conf), getScmRpcRetryInterval(conf),
+ TimeUnit.MILLISECONDS);
ReconDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
ReconDatanodeProtocolPB.class, version,
address, UserGroupInformation.getCurrentUser(), hadoopConfig,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 77260e2f8b93..cef1c5c2f23e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -318,6 +318,23 @@ List getIncrementalReports(
return reportsToReturn;
}
+ List getNonIncrementalReports() {
+ List nonIncrementalReports = new LinkedList<>();
+ GeneratedMessage report = containerReports.get();
+ if (report != null) {
+ nonIncrementalReports.add(report);
+ }
+ report = nodeReport.get();
+ if (report != null) {
+ nonIncrementalReports.add(report);
+ }
+ report = pipelineReports.get();
+ if (report != null) {
+ nonIncrementalReports.add(report);
+ }
+ return nonIncrementalReports;
+ }
+
/**
* Returns available reports from the report queue with a max limit on
* list size, or empty list if the queue is empty.
@@ -326,21 +343,17 @@ List getIncrementalReports(
*/
public List getReports(InetSocketAddress endpoint,
int maxLimit) {
- List reportsToReturn =
- getIncrementalReports(endpoint, maxLimit);
- GeneratedMessage report = containerReports.get();
- if (report != null) {
- reportsToReturn.add(report);
+ if (maxLimit < 0) {
+ throw new IllegalArgumentException("Illegal maxLimit value: " + maxLimit);
}
- report = nodeReport.get();
- if (report != null) {
- reportsToReturn.add(report);
+ List reports = getNonIncrementalReports();
+ if (maxLimit <= reports.size()) {
+ return reports.subList(0, maxLimit);
+ } else {
+ reports.addAll(getIncrementalReports(endpoint,
+ maxLimit - reports.size()));
+ return reports;
}
- report = pipelineReports.get();
- if (report != null) {
- reportsToReturn.add(report);
- }
- return reportsToReturn;
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
index 53dac9dcf387..271ccffe2360 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcReplicationClient.java
@@ -18,14 +18,12 @@
package org.apache.hadoop.ozone.container.replication;
-import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UncheckedIOException;
import java.nio.file.Files;
import java.nio.file.Path;
-import java.security.cert.X509Certificate;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.TimeUnit;
@@ -34,6 +32,7 @@
import org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc;
import org.apache.hadoop.hdds.protocol.datanode.proto.IntraDatanodeProtocolServiceGrpc.IntraDatanodeProtocolServiceStub;
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.ozone.OzoneConsts;
import com.google.common.base.Preconditions;
@@ -62,7 +61,7 @@ public class GrpcReplicationClient implements AutoCloseable{
public GrpcReplicationClient(
String host, int port, Path workingDir,
- SecurityConfig secConfig, X509Certificate caCert
+ SecurityConfig secConfig, CertificateClient certClient
) throws IOException {
NettyChannelBuilder channelBuilder =
NettyChannelBuilder.forAddress(host, port)
@@ -73,14 +72,13 @@ public GrpcReplicationClient(
channelBuilder.useTransportSecurity();
SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient();
- if (caCert != null) {
- sslContextBuilder.trustManager(caCert);
+ if (certClient != null) {
+ sslContextBuilder
+ .trustManager(certClient.getCACertificate())
+ .clientAuth(ClientAuth.REQUIRE)
+ .keyManager(certClient.getPrivateKey(),
+ certClient.getCertificate());
}
-
- sslContextBuilder.clientAuth(ClientAuth.REQUIRE);
- sslContextBuilder.keyManager(
- new File(secConfig.getCertificateFileName()),
- new File(secConfig.getPrivateKeyFileName()));
if (secConfig.useTestCert()) {
channelBuilder.overrideAuthority("localhost");
}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
index 8de34ccd7689..c2415ba188a1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
@@ -21,7 +21,6 @@
import java.io.IOException;
import java.nio.file.Path;
import java.nio.file.Paths;
-import java.security.cert.X509Certificate;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
@@ -31,6 +30,7 @@
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import com.google.common.annotations.VisibleForTesting;
@@ -51,11 +51,11 @@ public class SimpleContainerDownloader implements ContainerDownloader {
private final Path workingDirectory;
private final SecurityConfig securityConfig;
- private final X509Certificate caCert;
+ private final CertificateClient certClient;
public SimpleContainerDownloader(
ConfigurationSource conf,
- X509Certificate caCert
+ CertificateClient certClient
) {
String workDirString =
@@ -68,7 +68,7 @@ public SimpleContainerDownloader(
workingDirectory = Paths.get(workDirString);
}
securityConfig = new SecurityConfig(conf);
- this.caCert = caCert;
+ this.certClient = certClient;
}
@Override
@@ -133,7 +133,7 @@ protected CompletableFuture downloadContainer(
GrpcReplicationClient grpcReplicationClient =
new GrpcReplicationClient(datanode.getIpAddress(),
datanode.getPort(Name.REPLICATION).getValue(),
- workingDirectory, securityConfig, caCert);
+ workingDirectory, securityConfig, certClient);
result = grpcReplicationClient.download(containerId)
.thenApply(r -> {
try {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
index 67f50738b450..586d171fff50 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
@@ -565,4 +565,40 @@ public DatanodeStates await(long time, TimeUnit timeUnit) {
assertEquals(1, awaited.get());
assertEquals(1, executed.get());
}
+
+ @Test
+ public void testGetReports() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ DatanodeStateMachine datanodeStateMachineMock =
+ mock(DatanodeStateMachine.class);
+
+ StateContext ctx = new StateContext(conf, DatanodeStates.getInitState(),
+ datanodeStateMachineMock);
+ InetSocketAddress scm1 = new InetSocketAddress("scm1", 9001);
+ ctx.addEndpoint(scm1);
+ InetSocketAddress scm2 = new InetSocketAddress("scm2", 9001);
+ ctx.addEndpoint(scm2);
+ // Check initial state
+ assertEquals(0, ctx.getAllAvailableReports(scm1).size());
+ assertEquals(0, ctx.getAllAvailableReports(scm2).size());
+
+ Map expectedReportCount = new HashMap<>();
+
+ // Add a bunch of ContainerReports
+ batchAddReports(ctx, StateContext.CONTAINER_REPORTS_PROTO_NAME, 128);
+ batchAddReports(ctx, StateContext.NODE_REPORT_PROTO_NAME, 128);
+ batchAddReports(ctx, StateContext.PIPELINE_REPORTS_PROTO_NAME, 128);
+ batchAddReports(ctx,
+ StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 128);
+
+ // Should only keep the latest one
+ expectedReportCount.put(StateContext.CONTAINER_REPORTS_PROTO_NAME, 1);
+ expectedReportCount.put(StateContext.NODE_REPORT_PROTO_NAME, 1);
+ expectedReportCount.put(StateContext.PIPELINE_REPORTS_PROTO_NAME, 1);
+ // Should keep less or equal than maxLimit depending on other reports' size.
+ expectedReportCount.put(
+ StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 97);
+ checkReportCount(ctx.getReports(scm1, 100), expectedReportCount);
+ checkReportCount(ctx.getReports(scm2, 100), expectedReportCount);
+ }
}
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/recipe/BotoClient.md b/hadoop-hdds/docs/content/recipe/BotoClient.md
new file mode 100644
index 000000000000..d8dc02d4bc51
--- /dev/null
+++ b/hadoop-hdds/docs/content/recipe/BotoClient.md
@@ -0,0 +1,189 @@
+---
+title: Access Ozone object store with Amazon Boto3 client
+linktitle: Ozone with Boto3 Client
+summary: How to access Ozone object store with Boto3 client?
+---
+
+
+This recipe shows how Ozone object store can be accessed from Boto3 client. Following apis were verified:
+
+ - Create bucket
+ - List bucket
+ - Head bucket
+ - Delete bucket
+ - Upload file
+ - Download file
+ - Delete objects(keys)
+ - Head object
+ - Multipart upload
+
+
+## Requirements
+
+You will need a higher version of Python3 for your Boto3 client as Boto3 installation requirement indicates at here:
+https://boto3.amazonaws.com/v1/documentation/api/latest/index.html
+
+## Obtain resource to Ozone
+You may reference Amazon Boto3 documentation regarding the creation of 's3' resources at here:
+https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html
+
+ s3 = boto3.resource('s3',
+ endpoint_url='http://localhost:9878',
+ aws_access_key_id='testuser/scm@EXAMPLE.COM',
+ aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999'
+ )
+ 'endpoint_url' is pointing to Ozone s3 endpoint.
+
+
+## Obtain client to Ozone via session
+You may reference Amazon Boto3 documentation regarding session at here:
+https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
+
+ Create a session
+ session = boto3.session.Session()
+
+ Obtain s3 client to Ozone via session:
+
+ s3_client = session.client(
+ service_name='s3',
+ aws_access_key_id='testuser/scm@EXAMPLE.COM',
+ aws_secret_access_key='c261b6ecabf7d37d5f9ded654b1c724adac9bd9f13e247a235e567e8296d2999',
+ endpoint_url='http://localhost:9878',
+ )
+ 'endpoint_url' is pointing to Ozone s3 endpoint.
+
+ In our code sample below, we're demonstrating the usage of both s3 and s3_client.
+
+There are multiple ways to configure Boto3 client credentials if you're connecting to a secured cluster. In these cases,
+the above lines of passing 'aws_access_key_id' and 'aws_secret_access_key' when creating Ozone s3 client shall be skipped.
+
+Please refer to Boto3 documentation for details at here:
+https://boto3.amazonaws.com/v1/documentation/api/latest/guide/credentials.html
+
+
+### Create a bucket
+ response = s3_client.create_bucket(Bucket='bucket1')
+ print(response)
+
+This will create a bucket 'bucket1' in Ozone volume 's3v'.
+
+### List buckets
+ response = s3_client.list_buckets()
+ print('Existing buckets:')
+ for bucket in response['Buckets']:
+ print(f' {bucket["Name"]}')
+
+This will list all buckets in Ozone volume 's3v'.
+
+### Head a bucket
+ response = s3_client.head_bucket(Bucket='bucket1')
+ print(response)
+
+This will head bucket 'bucket1' in Ozone volume 's3v'.
+
+### Delete a bucket
+ response = s3_client.delete_bucket(Bucket='bucket1')
+ print(response)
+
+This will delete the bucket 'bucket1' from Ozone volume 's3v'.
+
+### Upload a file
+ response = s3.Bucket('bucket1').upload_file('./README.md','README.md')
+ print(response)
+
+This will upload 'README.md' to Ozone creates a key 'README.md' in volume 's3v'.
+
+### Download a file
+ response = s3.Bucket('bucket1').download_file('README.md', 'download.md')
+ print(response)
+
+This will download 'README.md' from Ozone volume 's3v' to local and create a file with name 'download.md'.
+
+### Head an object
+ response = s3_client.head_object(Bucket='bucket1', Key='README.md')
+ print(response)
+
+This will head object 'README.md' from Ozone volume 's3v' in the bucket 'bucket1'.
+
+### Delete Objects
+ response = s3_client.delete_objects(
+ Bucket='bucket1',
+ Delete={
+ 'Objects': [
+ {
+ 'Key': 'README4.md',
+ },
+ {
+ 'Key': 'README3.md',
+ },
+ ],
+ 'Quiet': False,
+ },
+ )
+
+This will delete objects 'README3.md' and 'README4.md' from Ozone volume 's3v' in bucket 'bucket1'.
+
+### Multipart upload
+ response = s3_client.create_multipart_upload(Bucket='bucket1', Key='key1')
+ print(response)
+ uid=response['UploadId']
+ print(uid)
+
+ response = s3_client.upload_part_copy(
+ Bucket='bucket1',
+ CopySource='/bucket1/maven.gz',
+ Key='key1',
+ PartNumber=1,
+ UploadId=str(uid)
+ )
+ print(response)
+ etag1=response.get('CopyPartResult').get('ETag')
+ print(etag1)
+
+ response = s3_client.upload_part_copy(
+ Bucket='bucket1',
+ CopySource='/bucket1/maven1.gz',
+ Key='key1',
+ PartNumber=2,
+ UploadId=str(uid)
+ )
+ print(response)
+ etag2=response.get('CopyPartResult').get('ETag')
+ print(etag2)
+
+ response = s3_client.complete_multipart_upload(
+ Bucket='bucket1',
+ Key='key1',
+ MultipartUpload={
+ 'Parts': [
+ {
+ 'ETag': str(etag1),
+ 'PartNumber': 1,
+ },
+ {
+ 'ETag': str(etag2),
+ 'PartNumber': 2,
+ },
+ ],
+ },
+ UploadId=str(uid),
+ )
+ print(response)
+
+This will use 'maven.gz' and 'maven1.gz' as copy source from Ozone volume 's3v' to create a new object 'key1'
+in Ozone volume 's3v'. Please note 'ETag's is required and important for the call.
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 6aaa373be954..a2930c870ee6 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -16,14 +16,9 @@
*/
package org.apache.hadoop.hdds.scm.protocolPB;
-import java.io.Closeable;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.function.Consumer;
-
+import com.google.common.base.Preconditions;
+import com.google.protobuf.RpcController;
+import com.google.protobuf.ServiceException;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -41,12 +36,15 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DatanodeUsageInfoRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DatanodeUsageInfoResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
@@ -54,22 +52,21 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest.Builder;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type;
import org.apache.hadoop.hdds.scm.ScmInfo;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -83,9 +80,13 @@
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer;
import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages;
-import com.google.common.base.Preconditions;
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Consumer;
import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
@@ -617,6 +618,32 @@ public boolean getReplicationManagerStatus() throws IOException {
}
+ /**
+ * Builds request for datanode usage information and receives response.
+ *
+ * @param ipaddress - Address String
+ * @param uuid - UUID String
+ * @return List of DatanodeUsageInfo. Each element contains info such as
+ * capacity, SCMUsed, and remaining space.
+ * @throws IOException
+ */
+ @Override
+ public List getDatanodeUsageInfo(
+ String ipaddress, String uuid) throws IOException {
+
+ DatanodeUsageInfoRequestProto request =
+ DatanodeUsageInfoRequestProto.newBuilder()
+ .setIpaddress(ipaddress)
+ .setUuid(uuid)
+ .build();
+
+ DatanodeUsageInfoResponseProto response =
+ submitRequest(Type.DatanodeUsageInfo,
+ builder -> builder.setDatanodeUsageInfoRequest(request))
+ .getDatanodeUsageInfoResponse();
+ return response.getInfoList();
+ }
+
@Override
public StatusAndMessages finalizeScmUpgrade(String upgradeClientID)
throws IOException {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CRLApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CRLApprover.java
new file mode 100644
index 000000000000..8632743dd069
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CRLApprover.java
@@ -0,0 +1,43 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509.certificate.authority;
+
+import org.bouncycastle.cert.X509v2CRLBuilder;
+import org.bouncycastle.operator.OperatorCreationException;
+
+import java.security.cert.CRLException;
+import java.security.cert.X509CRL;
+
+/**
+ * CRL Approver interface is used to sign CRLs.
+ */
+public interface CRLApprover {
+
+ /**
+ * Signs a CRL.
+ * @param builder - CRL builder instance with CRL info to be signed.
+ * @return Signed CRL.
+ * @throws CRLException - On Error
+ * @throws OperatorCreationException - on Error.
+ */
+ X509CRL sign(X509v2CRLBuilder builder)
+ throws CRLException, OperatorCreationException;
+
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
index 76512c54d190..f0443d86ec7d 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateServer.java
@@ -23,13 +23,17 @@
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateApprover.ApprovalType;
+import org.bouncycastle.asn1.x509.CRLReason;
import org.bouncycastle.cert.X509CertificateHolder;
import org.bouncycastle.pkcs.PKCS10CertificationRequest;
import java.io.IOException;
+import java.math.BigInteger;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
+import java.util.Date;
import java.util.List;
+import java.util.Optional;
import java.util.concurrent.Future;
/**
@@ -46,7 +50,7 @@ public interface CertificateServer {
* @throws SCMSecurityException - Throws if the init fails.
*/
void init(SecurityConfig securityConfig, CAType type)
- throws SCMSecurityException;
+ throws IOException;
/**
* Returns the CA Certificate for this CA.
@@ -101,26 +105,25 @@ Future requestCertificate(String csr,
/**
* Revokes a Certificate issued by this CertificateServer.
*
- * @param certificate - Certificate to revoke
- * @param approver - Approval process to follow.
- * @return Future that tells us what happened.
- * @throws SCMSecurityException - on Error.
- */
- Future revokeCertificate(X509Certificate certificate,
- ApprovalType approver) throws SCMSecurityException;
-
- /**
- * TODO : CRL, OCSP etc. Later. This is the start of a CertificateServer
- * framework.
+ * @param serialIDs - List of serial IDs of Certificates to be revoked.
+ * @param reason - Reason for revocation.
+ * @param securityConfig - Security Configuration.
+ * @param revocationTime - Revocation time for the certificates.
+ * @return Future that gives a list of certificates that were revoked.
*/
+ Future> revokeCertificates(
+ List serialIDs,
+ CRLReason reason,
+ Date revocationTime,
+ SecurityConfig securityConfig);
/**
* List certificates.
* @param type - node type: OM/SCM/DN
* @param startSerialId - start certificate serial id
* @param count - max number of certificates returned in a batch
- * @return
- * @throws IOException
+ * @return List of X509 Certificates.
+ * @throws IOException - On Failure
*/
List listCertificate(HddsProtos.NodeType type,
long startSerialId, int count, boolean isRevoked) throws IOException;
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
index 3ddb6400d79e..8a265c65241e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/CertificateStore.java
@@ -20,11 +20,15 @@
package org.apache.hadoop.hdds.security.x509.certificate.authority;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.bouncycastle.asn1.x509.CRLReason;
+import org.bouncycastle.cert.X509CertificateHolder;
import java.io.IOException;
import java.math.BigInteger;
import java.security.cert.X509Certificate;
+import java.util.Date;
import java.util.List;
+import java.util.Optional;
/**
* This interface allows the DefaultCA to be portable and use different DB
@@ -47,12 +51,26 @@ void storeValidCertificate(BigInteger serialID,
X509Certificate certificate) throws IOException;
/**
- * Moves a certificate in a transactional manner from valid certificate to
- * revoked certificate state.
- * @param serialID - Serial ID of the certificate.
- * @throws IOException
+ * Adds the certificates to be revoked to a new CRL and moves all the
+ * certificates in a transactional manner from valid certificate to
+ * revoked certificate state. Returns an empty {@code Optional} instance if
+ * the certificates were invalid / not found / already revoked and no CRL
+ * was generated. Otherwise, returns the newly generated CRL sequence ID.
+ * @param serialIDs - List of Serial IDs of Certificates to be revoked.
+ * @param caCertificateHolder - X509 Certificate Holder of the CA.
+ * @param reason - CRLReason for revocation.
+ * @param revocationTime - Revocation Time for the certificates.
+ * @param approver - CRL approver to sign the CRL.
+ * @return An empty {@code Optional} instance if no CRL was generated.
+ * Otherwise, returns the newly generated CRL sequence ID.
+ * @throws IOException - on failure.
*/
- void revokeCertificate(BigInteger serialID) throws IOException;
+ Optional revokeCertificates(List serialIDs,
+ X509CertificateHolder caCertificateHolder,
+ CRLReason reason,
+ Date revocationTime,
+ CRLApprover approver)
+ throws IOException;
/**
* Deletes an expired certificate from the store. Please note: We don't
@@ -65,9 +83,9 @@ void storeValidCertificate(BigInteger serialID,
/**
* Retrieves a Certificate based on the Serial number of that certificate.
* @param serialID - ID of the certificate.
- * @param certType
+ * @param certType - Whether its Valid or Revoked certificate.
* @return X509Certificate
- * @throws IOException
+ * @throws IOException - on failure.
*/
X509Certificate getCertificateByID(BigInteger serialID, CertType certType)
throws IOException;
@@ -79,7 +97,7 @@ X509Certificate getCertificateByID(BigInteger serialID, CertType certType)
* @param count - max number of certs returned.
* @param certType cert type (valid/revoked).
* @return list of X509 certificates.
- * @throws IOException
+ * @throws IOException - on failure.
*/
List listCertificate(HddsProtos.NodeType role,
BigInteger startSerialID, int count, CertType certType)
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
index 05232092c90c..3b1a47eb9cd8 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java
@@ -21,6 +21,7 @@
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
+import org.apache.commons.collections.CollectionUtils;
import org.apache.commons.validator.routines.DomainValidator;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
@@ -32,6 +33,7 @@
import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator;
import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
import org.apache.hadoop.ozone.OzoneSecurityUtil;
+import org.bouncycastle.asn1.x509.CRLReason;
import org.bouncycastle.cert.X509CertificateHolder;
import org.bouncycastle.operator.OperatorCreationException;
import org.bouncycastle.pkcs.PKCS10CertificationRequest;
@@ -52,12 +54,14 @@
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.LocalTime;
+import java.util.Date;
import java.util.List;
+import java.util.Optional;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.Future;
import java.util.function.Consumer;
-import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.*;
+import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getCertificationRequest;
import static org.apache.hadoop.hdds.security.x509.exceptions.CertificateException.ErrorCode.CSR_ERROR;
/**
@@ -121,6 +125,7 @@ public class DefaultCAServer implements CertificateServer {
*/
private PKIProfile profile;
private CertificateApprover approver;
+ private CRLApprover crlApprover;
private CertificateStore store;
/**
@@ -140,7 +145,7 @@ public DefaultCAServer(String subject, String clusterID, String scmID,
@Override
public void init(SecurityConfig securityConfig, CAType type)
- throws SCMSecurityException {
+ throws IOException {
caKeysPath = securityConfig.getKeyLocation(componentName);
caRootX509Path = securityConfig.getCertificateLocation(componentName);
this.config = securityConfig;
@@ -150,7 +155,7 @@ public void init(SecurityConfig securityConfig, CAType type)
profile = new DefaultProfile();
this.approver = new DefaultApprover(profile, this.config);
- /* In future we will spilt this code to have different kind of CAs.
+ /* In future we will split this code to have different kind of CAs.
* Right now, we have only self-signed CertificateServer.
*/
@@ -159,6 +164,8 @@ public void init(SecurityConfig securityConfig, CAType type)
Consumer caInitializer =
processVerificationStatus(status);
caInitializer.accept(securityConfig);
+ crlApprover = new DefaultCRLApprover(securityConfig,
+ getCAKeys().getPrivate());
return;
}
@@ -272,20 +279,25 @@ public Future requestCertificate(String csr,
}
@Override
- public Future revokeCertificate(X509Certificate certificate,
- CertificateApprover.ApprovalType approverType)
- throws SCMSecurityException {
- CompletableFuture revoked = new CompletableFuture<>();
- if (certificate == null) {
+ public Future> revokeCertificates(
+ List certificates,
+ CRLReason reason,
+ Date revocationTime,
+ SecurityConfig securityConfig) {
+ CompletableFuture> revoked = new CompletableFuture<>();
+ if (CollectionUtils.isEmpty(certificates)) {
revoked.completeExceptionally(new SCMSecurityException(
- "Certificate cannot be null"));
+ "Certificates cannot be null or empty"));
return revoked;
}
try {
- store.revokeCertificate(certificate.getSerialNumber());
+ revoked.complete(
+ store.revokeCertificates(certificates,
+ getCACertificate(), reason, revocationTime, crlApprover)
+ );
} catch (IOException ex) {
LOG.error("Revoking the certificate failed.", ex.getCause());
- throw new SCMSecurityException(ex);
+ revoked.completeExceptionally(new SCMSecurityException(ex));
}
return revoked;
}
@@ -347,11 +359,14 @@ private VerificationStatus verifySelfSignedCA(SecurityConfig securityConfig) {
boolean keyStatus = checkIfKeysExist();
boolean certStatus = checkIfCertificatesExist();
+ // Check if both certStatus and keyStatus is set to true and return success.
if ((certStatus == keyStatus) && (certStatus)) {
return VerificationStatus.SUCCESS;
}
- if ((certStatus == keyStatus) && (!certStatus)) {
+ // At this point both certStatus and keyStatus should be false if they
+ // are equal
+ if ((certStatus == keyStatus)) {
return VerificationStatus.INITIALIZE;
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCRLApprover.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCRLApprover.java
new file mode 100644
index 000000000000..683905af6f87
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCRLApprover.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.security.x509.certificate.authority;
+
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+import org.apache.hadoop.hdds.security.x509.certificate.utils.CRLCodec;
+import org.bouncycastle.cert.X509CRLHolder;
+import org.bouncycastle.cert.X509v2CRLBuilder;
+import org.bouncycastle.operator.OperatorCreationException;
+import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
+
+import java.security.PrivateKey;
+import java.security.cert.CRLException;
+import java.security.cert.X509CRL;
+
+/**
+ * Default CRL Approver used by the DefaultCA.
+ */
+public class DefaultCRLApprover implements CRLApprover {
+
+ private SecurityConfig config;
+ private PrivateKey caPrivate;
+
+ public DefaultCRLApprover(SecurityConfig config, PrivateKey caPrivate) {
+ this.config = config;
+ this.caPrivate = caPrivate;
+ }
+
+ @Override
+ public X509CRL sign(X509v2CRLBuilder builder)
+ throws CRLException, OperatorCreationException {
+ JcaContentSignerBuilder contentSignerBuilder =
+ new JcaContentSignerBuilder(config.getSignatureAlgo());
+
+ contentSignerBuilder.setProvider(config.getProvider());
+ X509CRLHolder crlHolder =
+ builder.build(contentSignerBuilder.build(caPrivate));
+
+ return CRLCodec.getX509CRL(crlHolder);
+ }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
new file mode 100644
index 000000000000..f137adce7034
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/CRLInfo.java
@@ -0,0 +1,168 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.security.x509.crl;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
+import org.apache.hadoop.hdds.security.x509.certificate.utils.CRLCodec;
+import org.jetbrains.annotations.NotNull;
+
+import java.io.IOException;
+import java.security.cert.CRLException;
+import java.security.cert.CertificateException;
+import java.security.cert.X509CRL;
+import java.util.Comparator;
+import java.util.Objects;
+
+/**
+ * Class that wraps Certificate Revocation List Info.
+ */
+public class CRLInfo implements Comparator,
+ Comparable {
+
+ private X509CRL x509CRL;
+ private long creationTimestamp;
+
+ private CRLInfo(X509CRL x509CRL, long creationTimestamp) {
+ this.x509CRL = x509CRL;
+ this.creationTimestamp = creationTimestamp;
+ }
+
+ /**
+ * Constructor for CRLInfo. Needed for serialization findbugs.
+ */
+ public CRLInfo() {
+ }
+
+ public static CRLInfo fromProtobuf(HddsProtos.CRLInfoProto info)
+ throws IOException, CRLException, CertificateException {
+ CRLInfo.Builder builder = new CRLInfo.Builder();
+ return builder
+ .setX509CRL(CRLCodec.getX509CRL(info.getX509CRL()))
+ .setCreationTimestamp(info.getCreationTimestamp())
+ .build();
+ }
+
+ public HddsProtos.CRLInfoProto getProtobuf() throws SCMSecurityException {
+ HddsProtos.CRLInfoProto.Builder builder =
+ HddsProtos.CRLInfoProto.newBuilder();
+
+ return builder.setX509CRL(CRLCodec.getPEMEncodedString(getX509CRL()))
+ .setCreationTimestamp(getCreationTimestamp())
+ .build();
+ }
+
+ public X509CRL getX509CRL() {
+ return x509CRL;
+ }
+
+ public long getCreationTimestamp() {
+ return creationTimestamp;
+ }
+
+ /**
+ * Compares this object with the specified object for order. Returns a
+ * negative integer, zero, or a positive integer as this object is less
+ * than, equal to, or greater than the specified object.
+ *
+ * @param o the object to be compared.
+ * @return a negative integer, zero, or a positive integer as this object
+ * is less than, equal to, or greater than the specified object.
+ * @throws NullPointerException if the specified object is null
+ * @throws ClassCastException if the specified object's type prevents it
+ * from being compared to this object.
+ */
+ @Override
+ public int compareTo(@NotNull CRLInfo o) {
+ return this.compare(this, o);
+ }
+
+ /**
+ * Compares its two arguments for order. Returns a negative integer,
+ * zero, or a positive integer as the first argument is less than, equal
+ * to, or greater than the second.
+ *
+ *
+ * @param o1 the first object to be compared.
+ * @param o2 the second object to be compared.
+ * @return a negative integer, zero, or a positive integer as the
+ * first argument is less than, equal to, or greater than the
+ * second.
+ * @throws NullPointerException if an argument is null and this
+ * comparator does not permit null arguments
+ * @throws ClassCastException if the arguments' types prevent them from
+ * being compared by this comparator.
+ */
+ @Override
+ public int compare(CRLInfo o1, CRLInfo o2) {
+ return Long.compare(o1.getCreationTimestamp(), o2.getCreationTimestamp());
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+
+ CRLInfo that = (CRLInfo) o;
+
+ return this.getX509CRL().equals(that.x509CRL) &&
+ this.creationTimestamp == that.creationTimestamp;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(getX509CRL(), getCreationTimestamp());
+ }
+
+ @Override
+ public String toString() {
+ return "CRLInfo{" +
+ "x509CRL=" + x509CRL.toString() +
+ ", creationTimestamp=" + creationTimestamp +
+ '}';
+ }
+
+ /**
+ * Builder class for CRLInfo.
+ */
+ @SuppressWarnings("checkstyle:hiddenfield")
+ public static class Builder {
+ private X509CRL x509CRL;
+ private long creationTimestamp;
+
+ public Builder setX509CRL(X509CRL x509CRL) {
+ this.x509CRL = x509CRL;
+ return this;
+ }
+
+ public Builder setCreationTimestamp(long creationTimestamp) {
+ this.creationTimestamp = creationTimestamp;
+ return this;
+ }
+
+ public CRLInfo build() {
+ return new CRLInfo(x509CRL, creationTimestamp);
+ }
+ }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/package-info.java
new file mode 100644
index 000000000000..9f768a326337
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/crl/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.client.io;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.fs.CanUnbuffer;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.Seekable;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.EOFException;
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
+
+public class MultipartCryptoKeyInputStream extends OzoneInputStream
+ implements Seekable, CanUnbuffer {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(MultipartCryptoKeyInputStream.class);
+
+ private static final int EOF = -1;
+
+ private String key;
+ private long length = 0L;
+ private boolean closed = false;
+
+ // List of OzoneCryptoInputStream, one for each part of the key
+ private List partStreams;
+
+ // partOffsets[i] stores the index of the first data byte in
+ // partStream w.r.t the whole key data.
+ // For example, let’s say the part size is 200 bytes and part[0] stores
+ // data from indices 0 - 199, part[1] from indices 200 - 399 and so on.
+ // Then, partOffsets[0] = 0 (the offset of the first byte of data in
+ // part[0]), partOffsets[1] = 200 and so on.
+ private long[] partOffsets;
+
+ // Index of the partStream corresponding to the current position of the
+ // MultipartCryptoKeyInputStream.
+ private int partIndex = 0;
+
+ // Tracks the partIndex corresponding to the last seeked position so that it
+ // can be reset if a new position is seeked.
+ private int prevPartIndex = 0;
+
+ // If a read's start/ length position doesn't coincide with a Crypto buffer
+ // boundary, it will be adjusted as reads should happen only at the buffer
+ // boundaries for decryption to happen correctly. In this case, after the
+ // data has been read and decrypted, only the requested data should be
+ // returned to the client. readPositionAdjustedBy and readLengthAdjustedBy
+ // store these adjustment information. Before returning to client, the first
+ // readPositionAdjustedBy number of bytes and the last readLengthAdjustedBy
+ // number of bytes must be discarded.
+ private int readPositionAdjustedBy = 0;
+ private int readLengthAdjustedBy = 0;
+
+ public MultipartCryptoKeyInputStream(String keyName,
+ List inputStreams) {
+
+ Preconditions.checkNotNull(inputStreams);
+
+ this.key = keyName;
+ this.partStreams = inputStreams;
+
+ // Calculate and update the partOffsets
+ this.partOffsets = new long[inputStreams.size()];
+ int i = 0;
+ for (OzoneCryptoInputStream ozoneCryptoInputStream : inputStreams) {
+ this.partOffsets[i++] = length;
+ length += ozoneCryptoInputStream.getLength();
+ }
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int read() throws IOException {
+ byte[] buf = new byte[1];
+ if (read(buf, 0, 1) == EOF) {
+ return EOF;
+ }
+ return Byte.toUnsignedInt(buf[0]);
+ }
+
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ checkOpen();
+ if (b == null) {
+ throw new NullPointerException();
+ }
+ if (off < 0 || len < 0 || len > b.length - off) {
+ throw new IndexOutOfBoundsException();
+ }
+ if (len == 0) {
+ return 0;
+ }
+ int totalReadLen = 0;
+ while (len > 0) {
+ if (partStreams.size() == 0 ||
+ (partStreams.size() - 1 <= partIndex &&
+ partStreams.get(partIndex).getRemaining() == 0)) {
+ return totalReadLen == 0 ? EOF : totalReadLen;
+ }
+
+ // Get the current partStream and read data from it
+ OzoneCryptoInputStream current = partStreams.get(partIndex);
+ // CryptoInputStream reads hadoop.security.crypto.buffer.size number of
+ // bytes (default 8KB) at a time. This needs to be taken into account
+ // in calculating the numBytesToRead.
+ int numBytesToRead = getNumBytesToRead(len, (int)current.getRemaining(),
+ current.getBufferSize());
+ int numBytesRead;
+
+ if (readPositionAdjustedBy != 0 || readLengthAdjustedBy != 0) {
+ // There was some adjustment made in position and/ or length of data
+ // to be read to account for Crypto buffer boundary. Hence, read the
+ // data into a temp buffer and then copy only the requested data into
+ // clients buffer.
+ byte[] tempBuffer = new byte[numBytesToRead];
+ int actualNumBytesRead = current.read(tempBuffer, 0,
+ numBytesToRead);
+ numBytesRead = actualNumBytesRead - readPositionAdjustedBy -
+ readLengthAdjustedBy;
+
+ if (actualNumBytesRead != numBytesToRead) {
+ throw new IOException(String.format("Inconsistent read for key=%s " +
+ "part=%d length=%d numBytesToRead(accounting for Crypto " +
+ "boundaries)=%d numBytesRead(actual)=%d " +
+ "numBytesToBeRead(into client buffer discarding crypto " +
+ "boundary adjustments)=%d",
+ key, partIndex, current.getLength(), numBytesToRead,
+ actualNumBytesRead, numBytesRead));
+ }
+
+ // TODO: Byte array copies are not optimal. If there is a better and
+ // more optimal solution to copy only a part of read data into
+ // client buffer, this should be updated.
+ System.arraycopy(tempBuffer, readPositionAdjustedBy, b, off,
+ numBytesRead);
+
+ LOG.debug("OzoneCryptoInputStream for key: {} part: {} read {} bytes " +
+ "instead of {} bytes to account for Crypto buffer boundary. " +
+ "Client buffer will be copied with read data from position {}" +
+ "upto position {}, discarding the extra bytes read to " +
+ "maintain Crypto buffer boundary limits", key, partIndex,
+ actualNumBytesRead, numBytesRead, readPositionAdjustedBy,
+ actualNumBytesRead - readPositionAdjustedBy);
+
+ // Reset readPositionAdjustedBy and readLengthAdjustedBy
+ readPositionAdjustedBy = 0;
+ readLengthAdjustedBy = 0;
+ } else {
+ numBytesRead = current.read(b, off, numBytesToRead);
+ if (numBytesRead != numBytesToRead) {
+ throw new IOException(String.format("Inconsistent read for key=%s " +
+ "part=%d length=%d numBytesToRead=%d numBytesRead=%d",
+ key, partIndex, current.getLength(), numBytesToRead,
+ numBytesRead));
+ }
+ }
+
+ totalReadLen += numBytesRead;
+ off += numBytesRead;
+ len -= numBytesRead;
+
+ if (current.getRemaining() <= 0 &&
+ ((partIndex + 1) < partStreams.size())) {
+ partIndex += 1;
+ }
+
+ }
+ return totalReadLen;
+ }
+
+ /**
+ * Get number of bytes to read from the current stream based on the length
+ * to be read, number of bytes remaining in the stream and the Crypto buffer
+ * size.
+ * Reads should be performed at the CryptoInputStream Buffer boundaries only.
+ * Otherwise, the decryption will be incorrect.
+ */
+ private int getNumBytesToRead(int lenToRead, int remaining,
+ int cryptoBufferSize) throws IOException {
+
+ Preconditions.checkArgument(readPositionAdjustedBy == 0);
+ Preconditions.checkArgument(readLengthAdjustedBy == 0);
+
+ // Check and adjust position if required
+ adjustReadPosition(cryptoBufferSize);
+ remaining += readPositionAdjustedBy;
+ lenToRead += readPositionAdjustedBy;
+
+ return adjustNumBytesToRead(lenToRead, remaining, cryptoBufferSize);
+ }
+
+ /**
+ * Reads should be performed at the CryptoInputStream Buffer boundary size.
+ * Otherwise, the decryption will be incorrect. Hence, if the position is
+ * not at the boundary limit, we have to adjust the position and might need
+ * to read more data than requested. The extra data will be filtered out
+ * before returning to the client.
+ */
+ private void adjustReadPosition(long cryptoBufferSize) throws IOException {
+ // Position of the buffer in current stream
+ long currentPosOfStream = partStreams.get(partIndex).getPos();
+ int modulus = (int) (currentPosOfStream % cryptoBufferSize);
+ if (modulus != 0) {
+ // Adjustment required.
+ // Update readPositionAdjustedBy and seek to the adjusted position
+ readPositionAdjustedBy = modulus;
+ // Seek current partStream to adjusted position. We do not need to
+ // reset the seeked positions of other streams.
+ partStreams.get(partIndex)
+ .seek(currentPosOfStream - readPositionAdjustedBy);
+ LOG.debug("OzoneCryptoInputStream for key: {} part: {} adjusted " +
+ "position {} by -{} to account for Crypto buffer boundary",
+ key, partIndex, currentPosOfStream, readPositionAdjustedBy);
+ }
+ }
+
+ /**
+ * If the length of data requested does not end at a Crypto Buffer
+ * boundary, the number of bytes to be read must be adjusted accordingly.
+ * The extra data will be filtered out before returning to the client.
+ */
+ private int adjustNumBytesToRead(int lenToRead, int remaining,
+ int cryptoBufferSize) {
+ int numBytesToRead = Math.min(cryptoBufferSize, remaining);
+ if (lenToRead < numBytesToRead) {
+ // Adjustment required; Update readLengthAdjustedBy.
+ readLengthAdjustedBy = numBytesToRead - lenToRead;
+ LOG.debug("OzoneCryptoInputStream for key: {} part: {} adjusted length " +
+ "by +{} to account for Crypto buffer boundary",
+ key, partIndex, readLengthAdjustedBy);
+ }
+ return numBytesToRead;
+ }
+
+ /**
+ * Seeks the InputStream to the specified position. This involves 2 steps:
+ * 1. Updating the partIndex to the partStream corresponding to the
+ * seeked position.
+ * 2. Seeking the corresponding partStream to the adjusted position.
+ *
+ * For example, let’s say the part sizes are 200 bytes and part[0] stores
+ * data from indices 0 - 199, part[1] from indices 200 - 399 and so on.
+ * Let’s say we seek to position 240. In the first step, the partIndex
+ * would be updated to 1 as indices 200 - 399 reside in partStream[1]. In
+ * the second step, the partStream[1] would be seeked to position 40 (=
+ * 240 - blockOffset[1] (= 200)).
+ */
+ @Override
+ public void seek(long pos) throws IOException {
+ if (pos == 0 && length == 0) {
+ // It is possible for length and pos to be zero in which case
+ // seek should return instead of throwing exception
+ return;
+ }
+ if (pos < 0 || pos > length) {
+ throw new EOFException("EOF encountered at pos: " + pos);
+ }
+
+ // 1. Update the partIndex
+ if (partIndex >= partStreams.size()) {
+ partIndex = Arrays.binarySearch(partOffsets, pos);
+ } else if (pos < partOffsets[partIndex]) {
+ partIndex =
+ Arrays.binarySearch(partOffsets, 0, partIndex, pos);
+ } else if (pos >= partOffsets[partIndex] + partStreams
+ .get(partIndex).getLength()) {
+ partIndex = Arrays.binarySearch(partOffsets, partIndex + 1,
+ partStreams.size(), pos);
+ }
+ if (partIndex < 0) {
+ // Binary search returns -insertionPoint - 1 if element is not present
+ // in the array. insertionPoint is the point at which element would be
+ // inserted in the sorted array. We need to adjust the blockIndex
+ // accordingly so that partIndex = insertionPoint - 1
+ partIndex = -partIndex - 2;
+ }
+
+ // Reset the previous partStream's position
+ partStreams.get(prevPartIndex).seek(0);
+
+ // Reset all the partStreams above the partIndex. We do this to reset
+ // any previous reads which might have updated the higher part
+ // streams position.
+ for (int index = partIndex + 1; index < partStreams.size(); index++) {
+ partStreams.get(index).seek(0);
+ }
+ // 2. Seek the partStream to the adjusted position
+ partStreams.get(partIndex).seek(pos - partOffsets[partIndex]);
+ prevPartIndex = partIndex;
+ }
+
+ @Override
+ public synchronized long getPos() throws IOException {
+ checkOpen();
+ return length == 0 ? 0 : partOffsets[partIndex] +
+ partStreams.get(partIndex).getPos();
+ }
+
+ @Override
+ public boolean seekToNewSource(long targetPos) throws IOException {
+ return false;
+ }
+
+ @Override
+ public int available() throws IOException {
+ checkOpen();
+ long remaining = length - getPos();
+ return remaining <= Integer.MAX_VALUE ? (int) remaining : Integer.MAX_VALUE;
+ }
+
+ @Override
+ public void unbuffer() {
+ for (CryptoInputStream cryptoInputStream : partStreams) {
+ cryptoInputStream.unbuffer();
+ }
+ }
+
+ @Override
+ public long skip(long n) throws IOException {
+ if (n <= 0) {
+ return 0;
+ }
+
+ long toSkip = Math.min(n, length - getPos());
+ seek(getPos() + toSkip);
+ return toSkip;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ closed = true;
+ for (OzoneCryptoInputStream partStream : partStreams) {
+ partStream.close();
+ }
+ }
+
+ /**
+ * Verify that the input stream is open. Non blocking; this gives
+ * the last state of the volatile {@link #closed} field.
+ * @throws IOException if the connection is closed.
+ */
+ private void checkOpen() throws IOException {
+ if (closed) {
+ throw new IOException(
+ ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: " + key);
+ }
+ }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java
new file mode 100644
index 000000000000..9d5d888688f4
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneCryptoInputStream.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.client.io;
+
+import java.io.IOException;
+import org.apache.hadoop.crypto.CryptoCodec;
+import org.apache.hadoop.crypto.CryptoInputStream;
+import org.apache.hadoop.crypto.CryptoStreamUtils;
+import org.apache.hadoop.fs.Seekable;
+
+/**
+ * A CryptoInputStream for Ozone with length. This stream is used to read
+ * Keys in Encrypted Buckets.
+ */
+public class OzoneCryptoInputStream extends CryptoInputStream
+ implements Seekable {
+
+ private final long length;
+ private final int bufferSize;
+
+ public OzoneCryptoInputStream(LengthInputStream in,
+ CryptoCodec codec, byte[] key, byte[] iv) throws IOException {
+ super(in.getWrappedStream(), codec, key, iv);
+ this.length = in.getLength();
+ // This is the buffer size used while creating the CryptoInputStream
+ // internally
+ this.bufferSize = CryptoStreamUtils.getBufferSize(codec.getConf());
+ }
+
+ public long getLength() {
+ return length;
+ }
+
+ public int getBufferSize() {
+ return bufferSize;
+ }
+
+ public long getRemaining() throws IOException {
+ return length - getPos();
+ }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
index f01975c94d8e..fb3928d071eb 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneInputStream.java
@@ -30,6 +30,9 @@ public class OzoneInputStream extends InputStream implements CanUnbuffer {
private final InputStream inputStream;
+ public OzoneInputStream() {
+ inputStream = null;
+ }
/**
* Constructs OzoneInputStream with KeyInputStream.
*
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
index e4a7d6a100ce..bf88b6fd38de 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.ozone.client.io;
+import org.apache.hadoop.crypto.CryptoOutputStream;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import java.io.IOException;
@@ -63,6 +64,12 @@ public synchronized void close() throws IOException {
public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
if (outputStream instanceof KeyOutputStream) {
return ((KeyOutputStream) outputStream).getCommitUploadPartInfo();
+ } else if (outputStream instanceof CryptoOutputStream) {
+ OutputStream wrappedStream =
+ ((CryptoOutputStream) outputStream).getWrappedStream();
+ if (wrappedStream instanceof KeyOutputStream) {
+ return ((KeyOutputStream) wrappedStream).getCommitUploadPartInfo();
+ }
}
// Otherwise return null.
return null;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index d23560c07dae..ed85a32adb13 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -67,6 +67,8 @@
import org.apache.hadoop.ozone.client.io.KeyInputStream;
import org.apache.hadoop.ozone.client.io.KeyOutputStream;
import org.apache.hadoop.ozone.client.io.LengthInputStream;
+import org.apache.hadoop.ozone.client.io.MultipartCryptoKeyInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneCryptoInputStream;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
@@ -942,7 +944,17 @@ public OzoneOutputStream createMultipartKey(String volumeName,
keyOutputStream.addPreallocateBlocks(
openKey.getKeyInfo().getLatestVersionLocations(),
openKey.getOpenVersion());
- return new OzoneOutputStream(keyOutputStream);
+ FileEncryptionInfo feInfo = keyOutputStream.getFileEncryptionInfo();
+ if (feInfo != null) {
+ KeyProvider.KeyVersion decrypted = getDEK(feInfo);
+ final CryptoOutputStream cryptoOut =
+ new CryptoOutputStream(keyOutputStream,
+ OzoneKMSUtil.getCryptoCodec(conf, feInfo),
+ decrypted.getMaterial(), feInfo.getIV());
+ return new OzoneOutputStream(cryptoOut);
+ } else {
+ return new OzoneOutputStream(keyOutputStream);
+ }
}
@Override
@@ -1192,23 +1204,18 @@ public List getAcl(OzoneObj obj) throws IOException {
private OzoneInputStream createInputStream(
OmKeyInfo keyInfo, Function retryFunction)
throws IOException {
- LengthInputStream lengthInputStream = KeyInputStream
- .getFromOmKeyInfo(keyInfo, xceiverClientManager,
- clientConfig.isChecksumVerify(), retryFunction);
+ // When Key is not MPU or when Key is MPU and encryption is not enabled
+ // Need to revisit for GDP.
FileEncryptionInfo feInfo = keyInfo.getFileEncryptionInfo();
- if (feInfo != null) {
- final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
- final CryptoInputStream cryptoIn =
- new CryptoInputStream(lengthInputStream.getWrappedStream(),
- OzoneKMSUtil.getCryptoCodec(conf, feInfo),
- decrypted.getMaterial(), feInfo.getIV());
- return new OzoneInputStream(cryptoIn);
- } else {
- try{
- GDPRSymmetricKey gk;
- Map keyInfoMetadata = keyInfo.getMetadata();
- if(Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))){
- gk = new GDPRSymmetricKey(
+
+ if (feInfo == null) {
+ LengthInputStream lengthInputStream = KeyInputStream
+ .getFromOmKeyInfo(keyInfo, xceiverClientManager,
+ clientConfig.isChecksumVerify(), retryFunction);
+ try {
+ Map< String, String > keyInfoMetadata = keyInfo.getMetadata();
+ if (Boolean.valueOf(keyInfoMetadata.get(OzoneConsts.GDPR_FLAG))) {
+ GDPRSymmetricKey gk = new GDPRSymmetricKey(
keyInfoMetadata.get(OzoneConsts.GDPR_SECRET),
keyInfoMetadata.get(OzoneConsts.GDPR_ALGORITHM)
);
@@ -1216,11 +1223,39 @@ private OzoneInputStream createInputStream(
return new OzoneInputStream(
new CipherInputStream(lengthInputStream, gk.getCipher()));
}
- }catch (Exception ex){
+ } catch (Exception ex) {
throw new IOException(ex);
}
+ return new OzoneInputStream(lengthInputStream.getWrappedStream());
+ } else if (!keyInfo.getLatestVersionLocations().isMultipartKey()) {
+ // Regular Key with FileEncryptionInfo
+ LengthInputStream lengthInputStream = KeyInputStream
+ .getFromOmKeyInfo(keyInfo, xceiverClientManager,
+ clientConfig.isChecksumVerify(), retryFunction);
+ final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
+ final CryptoInputStream cryptoIn =
+ new CryptoInputStream(lengthInputStream.getWrappedStream(),
+ OzoneKMSUtil.getCryptoCodec(conf, feInfo),
+ decrypted.getMaterial(), feInfo.getIV());
+ return new OzoneInputStream(cryptoIn);
+ } else {
+ // Multipart Key with FileEncryptionInfo
+ List lengthInputStreams = KeyInputStream
+ .getStreamsFromKeyInfo(keyInfo, xceiverClientManager,
+ clientConfig.isChecksumVerify(), retryFunction);
+ final KeyProvider.KeyVersion decrypted = getDEK(feInfo);
+
+ List cryptoInputStreams = new ArrayList<>();
+ for(LengthInputStream lengthInputStream : lengthInputStreams) {
+ final OzoneCryptoInputStream ozoneCryptoInputStream =
+ new OzoneCryptoInputStream(lengthInputStream,
+ OzoneKMSUtil.getCryptoCodec(conf, feInfo),
+ decrypted.getMaterial(), feInfo.getIV());
+ cryptoInputStreams.add(ozoneCryptoInputStream);
+ }
+ return new MultipartCryptoKeyInputStream(keyInfo.getKeyName(),
+ cryptoInputStreams);
}
- return new OzoneInputStream(lengthInputStream.getWrappedStream());
}
private OzoneOutputStream createOutputStream(OpenKeySession openKey,
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index 648176860cb7..43213a98490f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -145,9 +145,12 @@ public void updateModifcationTime() {
*
* @param locationInfoList list of locationInfo
*/
- public void updateLocationInfoList(List locationInfoList) {
+ public void updateLocationInfoList(List locationInfoList,
+ boolean isMpu) {
long latestVersion = getLatestVersionLocations().getVersion();
OmKeyLocationInfoGroup keyLocationInfoGroup = getLatestVersionLocations();
+
+ keyLocationInfoGroup.setMultipartKey(isMpu);
// Updates the latest locationList in the latest version only with
// given locationInfoList here.
// TODO : The original allocated list and the updated list here may vary
@@ -161,6 +164,8 @@ public void updateLocationInfoList(List locationInfoList) {
keyLocationInfoGroup.addAll(latestVersion, locationInfoList);
}
+
+
/**
* Append a set of blocks to the latest version. Note that these blocks are
* part of the latest version, not a new version.
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
index 4eac8bec7093..d1a721a476fa 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfo.java
@@ -42,21 +42,26 @@ public final class OmKeyLocationInfo {
private Pipeline pipeline;
+ // PartNumber is set for Multipart upload Keys.
+ private int partNumber = -1;
+
private OmKeyLocationInfo(BlockID blockID, Pipeline pipeline, long length,
- long offset) {
+ long offset, int partNumber) {
this.blockID = blockID;
this.pipeline = pipeline;
this.length = length;
this.offset = offset;
+ this.partNumber = partNumber;
}
private OmKeyLocationInfo(BlockID blockID, Pipeline pipeline, long length,
- long offset, Token token) {
+ long offset, Token token, int partNumber) {
this.blockID = blockID;
this.pipeline = pipeline;
this.length = length;
this.offset = offset;
this.token = token;
+ this.partNumber = partNumber;
}
public void setCreateVersion(long version) {
@@ -111,6 +116,14 @@ public void setPipeline(Pipeline pipeline) {
this.pipeline = pipeline;
}
+ public void setPartNumber(int partNumber) {
+ this.partNumber = partNumber;
+ }
+
+ public int getPartNumber() {
+ return partNumber;
+ }
+
/**
* Builder of OmKeyLocationInfo.
*/
@@ -120,6 +133,7 @@ public static class Builder {
private long offset;
private Token token;
private Pipeline pipeline;
+ private int partNumber;
public Builder setBlockID(BlockID blockId) {
this.blockID = blockId;
@@ -147,8 +161,14 @@ public Builder setToken(Token bToken) {
return this;
}
+ public Builder setPartNumber(int partNum) {
+ this.partNumber = partNum;
+ return this;
+ }
+
public OmKeyLocationInfo build() {
- return new OmKeyLocationInfo(blockID, pipeline, length, offset, token);
+ return new OmKeyLocationInfo(blockID, pipeline, length, offset, token,
+ partNumber);
}
}
@@ -161,7 +181,7 @@ public KeyLocation getProtobuf(boolean ignorePipeline, int clientVersion) {
.setBlockID(blockID.getProtobuf())
.setLength(length)
.setOffset(offset)
- .setCreateVersion(createVersion);
+ .setCreateVersion(createVersion).setPartNumber(partNumber);
if (this.token != null) {
builder.setToken(OzonePBHelper.protoFromToken(token));
}
@@ -189,7 +209,7 @@ public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
BlockID.getFromProtobuf(keyLocation.getBlockID()),
getPipeline(keyLocation),
keyLocation.getLength(),
- keyLocation.getOffset());
+ keyLocation.getOffset(), keyLocation.getPartNumber());
if(keyLocation.hasToken()) {
info.token = (Token)
OzonePBHelper.tokenFromProto(keyLocation.getToken());
@@ -199,14 +219,15 @@ public static OmKeyLocationInfo getFromProtobuf(KeyLocation keyLocation) {
}
@Override
- public String toString() {
+ public String toString() {
return "{blockID={containerID=" + blockID.getContainerID() +
", localID=" + blockID.getLocalID() + "}" +
", length=" + length +
", offset=" + offset +
", token=" + token +
", pipeline=" + pipeline +
- ", createVersion=" + createVersion + '}';
+ ", createVersion=" + createVersion + ", partNumber=" + partNumber
+ + '}';
}
@Override
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
index 440b1cb3f9ef..a93bcf24e441 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyLocationInfoGroup.java
@@ -32,9 +32,15 @@
public class OmKeyLocationInfoGroup {
private final long version;
private final Map> locationVersionMap;
+ private boolean isMultipartKey;
public OmKeyLocationInfoGroup(long version,
List locations) {
+ this(version, locations, false);
+ }
+
+ public OmKeyLocationInfoGroup(long version,
+ List locations, boolean isMultipartKey) {
this.version = version;
locationVersionMap = new HashMap<>();
for (OmKeyLocationInfo info : locations) {
@@ -44,14 +50,30 @@ public OmKeyLocationInfoGroup(long version,
}
//prevent NPE
this.locationVersionMap.putIfAbsent(version, new ArrayList<>());
+ this.isMultipartKey = isMultipartKey;
+
}
public OmKeyLocationInfoGroup(long version,
Map> locations) {
+ this(version, locations, false);
+ }
+
+ public OmKeyLocationInfoGroup(long version,
+ Map> locations, boolean isMultipartKey) {
this.version = version;
this.locationVersionMap = locations;
//prevent NPE
this.locationVersionMap.putIfAbsent(version, new ArrayList<>());
+ this.isMultipartKey = isMultipartKey;
+ }
+
+ public void setMultipartKey(boolean isMpu) {
+ this.isMultipartKey = isMpu;
+ }
+
+ public boolean isMultipartKey() {
+ return isMultipartKey;
}
/**
@@ -83,7 +105,7 @@ public List getLocationList(Long versionToFetch) {
public KeyLocationList getProtobuf(boolean ignorePipeline,
int clientVersion) {
KeyLocationList.Builder builder = KeyLocationList.newBuilder()
- .setVersion(version);
+ .setVersion(version).setIsMultipartKey(isMultipartKey);
List keyLocationList =
new ArrayList<>();
for (List locationList : locationVersionMap.values()) {
@@ -100,7 +122,9 @@ public static OmKeyLocationInfoGroup getFromProtobuf(
keyLocationList.getVersion(),
keyLocationList.getKeyLocationsList().stream()
.map(OmKeyLocationInfo::getFromProtobuf)
- .collect(Collectors.groupingBy(OmKeyLocationInfo::getCreateVersion))
+ .collect(Collectors.groupingBy(
+ OmKeyLocationInfo::getCreateVersion)),
+ keyLocationList.getIsMultipartKey()
);
}
@@ -141,6 +165,7 @@ void addAll(long versionToAdd, List locationInfoList) {
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("version:").append(version).append(" ");
+ sb.append("isMultipartKey:").append(isMultipartKey);
for (List kliList : locationVersionMap.values()) {
for(OmKeyLocationInfo kli: kliList) {
sb.append(kli.getLocalID()).append(" || ");
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
index d7d49bef0d45..cbb7f9a7d0f5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFileStatus.java
@@ -23,7 +23,6 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneFileStatusProto.Builder;
-import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
/**
@@ -94,14 +93,13 @@ public boolean isFile() {
return !isDirectory();
}
- public OzoneFileStatusProto getProtobuf() {
-
+ public OzoneFileStatusProto getProtobuf(int clientVersion) {
Builder builder = OzoneFileStatusProto.newBuilder()
.setBlockSize(blockSize)
.setIsDirectory(isDirectory);
//key info can be null for the fake root entry.
if (keyInfo != null) {
- builder.setKeyInfo(keyInfo.getProtobuf(CURRENT_VERSION));
+ builder.setKeyInfo(keyInfo.getProtobuf(clientVersion));
}
return builder.build();
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
index fc46059d9619..20bc779bd6bf 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java
@@ -656,6 +656,8 @@ public void commitKey(OmKeyArgs args, long clientId)
.setBucketName(args.getBucketName())
.setKeyName(args.getKeyName())
.setDataSize(args.getDataSize())
+ .setType(args.getType())
+ .setFactor(args.getFactor())
.addAllKeyLocations(locationInfoList.stream()
// TODO use OM version?
.map(info -> info.getProtobuf(CURRENT_VERSION))
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java
index 0ef69c14be65..015f9f1b7d0d 100644
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java
+++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java
@@ -72,7 +72,7 @@ public Void call() throws Exception {
.channelType(EpollServerDomainSocketChannel.class)
.workerEventLoopGroup(group)
.bossEventLoopGroup(group)
- .addService(new IdentitiyService())
+ .addService(new IdentityService())
.addService(new ControllerService(rpcClient,
csiConfig.getDefaultVolumeSize()))
.addService(new NodeService(csiConfig))
diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentityService.java
similarity index 97%
rename from hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java
rename to hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentityService.java
index 5a0c4c8ba8a0..8e997b4fbe44 100644
--- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentitiyService.java
+++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/IdentityService.java
@@ -32,7 +32,7 @@
/**
* Implementation of the CSI identity service.
*/
-public class IdentitiyService extends IdentityImplBase {
+public class IdentityService extends IdentityImplBase {
@Override
public void getPluginInfo(csi.v1.Csi.GetPluginInfoRequest request,
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
index 0dab683a991d..7555a793b006 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
@@ -18,7 +18,7 @@
-
+
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
index d44746f931ed..578982414f46 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
@@ -18,7 +18,7 @@
-
+
diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml
index c1da927cd4d0..5454799d12ee 100644
--- a/hadoop-ozone/dist/pom.xml
+++ b/hadoop-ozone/dist/pom.xml
@@ -28,7 +28,7 @@
UTF-8true
- 20200625-1
+ 20210226-1
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh
index c52034899593..4e0151be9b96 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozone-mr/test.sh
@@ -17,7 +17,7 @@
SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )
ALL_RESULT_DIR="$SCRIPT_DIR/result"
mkdir -p "$ALL_RESULT_DIR"
-rm "$ALL_RESULT_DIR/*" || true
+rm "$ALL_RESULT_DIR"/* || true
source "$SCRIPT_DIR/../testlib.sh"
tests=$(find_tests)
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
index 603427ed10b2..c8d3643e9736 100644
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/docker-config
@@ -59,6 +59,11 @@ OZONE-SITE.XML_ozone.om.kerberos.keytab.file=/etc/security/keytabs/om.keytab
OZONE-SITE.XML_ozone.recon.kerberos.keytab.file=/etc/security/keytabs/recon.keytab
OZONE-SITE.XML_ozone.recon.kerberos.principal=recon/recon@EXAMPLE.COM
+OZONE-SITE.XML_hdds.scm.replication.thread.interval=5s
+OZONE-SITE.XML_hdds.scm.replication.event.timeout=10s
+OZONE-SITE.XML_ozone.scm.stale.node.interval=30s
+OZONE-SITE.XML_ozone.scm.dead.node.interval=45s
+
HDFS-SITE.XML_dfs.datanode.kerberos.principal=dn/_HOST@EXAMPLE.COM
HDFS-SITE.XML_dfs.datanode.keytab.file=/etc/security/keytabs/dn.keytab
HDFS-SITE.XML_dfs.web.authentication.kerberos.principal=HTTP/_HOST@EXAMPLE.COM
diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
index eeccb849eedc..9150b244b61b 100755
--- a/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test.sh
@@ -52,9 +52,14 @@ done
execute_robot_test scm recon
execute_robot_test scm admincli
-
execute_robot_test scm spnego
+# test replication
+docker-compose up -d --scale datanode=2
+execute_robot_test scm -v container:1 -v count:2 replication/wait.robot
+docker-compose up -d --scale datanode=3
+execute_robot_test scm -v container:1 -v count:3 replication/wait.robot
+
stop_docker_env
generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/restart/test.sh b/hadoop-ozone/dist/src/main/compose/restart/test.sh
index 41e06c5ac816..cf0f53242da5 100644
--- a/hadoop-ozone/dist/src/main/compose/restart/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/restart/test.sh
@@ -23,19 +23,12 @@ export COMPOSE_DIR
export OZONE_VOLUME
-mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm}
-
-if [[ -n "${OZONE_VOLUME_OWNER}" ]]; then
- current_user=$(whoami)
- if [[ "${OZONE_VOLUME_OWNER}" != "${current_user}" ]]; then
- chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" \
- || sudo chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}"
- fi
-fi
-
# shellcheck source=/dev/null
source "${COMPOSE_DIR}/../testlib.sh"
+mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm}
+fix_data_dir_permissions
+
# prepare pre-upgrade cluster
start_docker_env
execute_robot_test scm -v PREFIX:pre freon/generate.robot
diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh
index c51a96935200..3ef2dcfb9d81 100755
--- a/hadoop-ozone/dist/src/main/compose/test-all.sh
+++ b/hadoop-ozone/dist/src/main/compose/test-all.sh
@@ -23,7 +23,7 @@ SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )
ALL_RESULT_DIR="$SCRIPT_DIR/result"
PROJECT_DIR="$SCRIPT_DIR/.."
mkdir -p "$ALL_RESULT_DIR"
-rm "$ALL_RESULT_DIR/*" || true
+rm "$ALL_RESULT_DIR"/* || true
source "$SCRIPT_DIR"/testlib.sh
diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh
index 0e12fde032d7..3ba5bcd12723 100755
--- a/hadoop-ozone/dist/src/main/compose/testlib.sh
+++ b/hadoop-ozone/dist/src/main/compose/testlib.sh
@@ -16,6 +16,9 @@
# limitations under the License.
set -e
+_testlib_this="${BASH_SOURCE[0]}"
+_testlib_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
COMPOSE_ENV_NAME=$(basename "$COMPOSE_DIR")
RESULT_DIR=${RESULT_DIR:-"$COMPOSE_DIR/result"}
RESULT_DIR_INSIDE="/tmp/smoketest/$(basename "$COMPOSE_ENV_NAME")/result"
@@ -280,12 +283,14 @@ cleanup_docker_images() {
## @description Generate robot framework reports based on the saved results.
generate_report(){
+ local title="${1:-${COMPOSE_ENV_NAME}}"
+ local dir="${2:-${RESULT_DIR}}"
if command -v rebot > /dev/null 2>&1; then
#Generate the combined output and return with the right exit code (note: robot = execute test, rebot = generate output)
- rebot --reporttitle "${COMPOSE_ENV_NAME}" -N "${COMPOSE_ENV_NAME}" -d "$RESULT_DIR" "$RESULT_DIR/robot-*.xml"
+ rebot --reporttitle "${title}" -N "${title}" -d "${dir}" "${dir}/*.xml"
else
- echo "Robot framework is not installed, the reports can be generated (sudo pip install robotframework)."
+ echo "Robot framework is not installed, the reports cannot be generated (sudo pip install robotframework)."
exit 1
fi
}
@@ -298,7 +303,7 @@ copy_results() {
local result_dir="${test_dir}/result"
local test_dir_name=$(basename ${test_dir})
if [[ -n "$(find "${result_dir}" -name "*.xml")" ]]; then
- rebot --nostatusrc -N "${test_dir_name}" -o "${all_result_dir}/${test_dir_name}.xml" "${result_dir}/*.xml"
+ rebot --nostatusrc -N "${test_dir_name}" -l NONE -r NONE -o "${all_result_dir}/${test_dir_name}.xml" "${result_dir}/*.xml"
fi
cp "${result_dir}"/docker-*.log "${all_result_dir}"/
@@ -325,3 +330,63 @@ run_test_script() {
return ${ret}
}
+
+## @description Make `OZONE_VOLUME_OWNER` the owner of the `OZONE_VOLUME`
+## directory tree (required in Github Actions runner environment)
+fix_data_dir_permissions() {
+ if [[ -n "${OZONE_VOLUME}" ]] && [[ -n "${OZONE_VOLUME_OWNER}" ]]; then
+ current_user=$(whoami)
+ if [[ "${OZONE_VOLUME_OWNER}" != "${current_user}" ]]; then
+ chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" \
+ || sudo chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}"
+ fi
+ fi
+}
+
+## @description Define variables required for using Ozone docker image which
+## includes binaries for a specific release
+## @param `ozone` image version
+prepare_for_binary_image() {
+ local v=$1
+
+ export OZONE_DIR=/opt/ozone
+ export OZONE_IMAGE="apache/ozone:${v}"
+}
+
+## @description Define variables required for using `ozone-runner` docker image
+## (no binaries included)
+## @param `ozone-runner` image version (optional)
+prepare_for_runner_image() {
+ local default_version=${docker.ozone-runner.version} # set at build-time from Maven property
+ local runner_version=${OZONE_RUNNER_VERSION:-${default_version}} # may be specified by user running the test
+ local v=${1:-${runner_version}} # prefer explicit argument
+
+ export OZONE_DIR=/opt/hadoop
+ export OZONE_IMAGE="apache/ozone-runner:${v}"
+}
+
+## @description Print the logical version for a specific release
+## @param the release for which logical version should be printed
+get_logical_version() {
+ local v="$1"
+
+ # shellcheck source=/dev/null
+ echo $(source "${_testlib_dir}/versions/${v}.sh" && ozone_logical_version)
+}
+
+## @description Activate the version-specific behavior for a given release
+## @param the release for which definitions should be loaded
+load_version_specifics() {
+ local v="$1"
+
+ # shellcheck source=/dev/null
+ source "${_testlib_dir}/versions/${v}.sh"
+
+ ozone_version_load
+}
+
+## @description Deactivate the previously version-specific behavior,
+## reverting to the current version's definitions
+unload_version_specifics() {
+ ozone_version_unload
+}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/0.5.0/test.sh
old mode 100644
new mode 100755
similarity index 79%
rename from hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh
rename to hadoop-ozone/dist/src/main/compose/upgrade/0.5.0/test.sh
index 667ce959026a..11530c7f3d0c
--- a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-0.5.0.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/0.5.0/test.sh
@@ -15,5 +15,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-export OZONE_ADMIN_COMMAND=scmcli
-export OZONE_SAFEMODE_STATUS_COMMAND='ozone scmcli safemode status'
+_mydir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+export OZONE_UPGRADE_FROM="$(basename ${_mydir})"
+export RESULT_DIR="${_mydir}/result"
+
+cd "${_mydir}/.."
+source upgrade_to_current.sh
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh b/hadoop-ozone/dist/src/main/compose/upgrade/1.0.0/test.sh
old mode 100644
new mode 100755
similarity index 79%
rename from hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh
rename to hadoop-ozone/dist/src/main/compose/upgrade/1.0.0/test.sh
index 3ff23e0441bc..11530c7f3d0c
--- a/hadoop-ozone/dist/src/main/compose/upgrade/versions/ozone-1.0.0.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/1.0.0/test.sh
@@ -15,5 +15,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-export OZONE_ADMIN_COMMAND=admin
-export OZONE_SAFEMODE_STATUS_COMMAND='ozone admin safemode status --verbose'
+_mydir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+export OZONE_UPGRADE_FROM="$(basename ${_mydir})"
+export RESULT_DIR="${_mydir}/result"
+
+cd "${_mydir}/.."
+source upgrade_to_current.sh
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
old mode 100644
new mode 100755
index 1c16c81ab921..929be05bcc79
--- a/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/test.sh
@@ -15,59 +15,27 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-export COMPOSE_DIR
-
-: "${OZONE_REPLICATION_FACTOR:=3}"
-: "${OZONE_UPGRADE_FROM:="0.5.0"}"
-: "${OZONE_UPGRADE_TO:="1.0.0"}"
-: "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}"
-
-export OZONE_VOLUME
-
-mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm}
-
-if [[ -n "${OZONE_VOLUME_OWNER}" ]]; then
- current_user=$(whoami)
- if [[ "${OZONE_VOLUME_OWNER}" != "${current_user}" ]]; then
- chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}" \
- || sudo chown -R "${OZONE_VOLUME_OWNER}" "${OZONE_VOLUME}"
+SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )
+ALL_RESULT_DIR="$SCRIPT_DIR/result"
+mkdir -p "$ALL_RESULT_DIR"
+rm "$ALL_RESULT_DIR"/* || true
+source "$SCRIPT_DIR/../testlib.sh"
+
+tests=$(find_tests)
+cd "$SCRIPT_DIR"
+
+RESULT=0
+# shellcheck disable=SC2044
+for t in ${tests}; do
+ d="$(dirname "${t}")"
+
+ if ! run_test_script "${d}"; then
+ RESULT=1
fi
-fi
-
-# define version-specifics
-export OZONE_DIR=/opt/ozone
-export OZONE_IMAGE="apache/ozone:${OZONE_UPGRADE_FROM}"
-# shellcheck source=/dev/null
-source "${COMPOSE_DIR}/versions/ozone-${OZONE_UPGRADE_FROM}.sh"
-# shellcheck source=/dev/null
-source "${COMPOSE_DIR}/../testlib.sh"
-
-# prepare pre-upgrade cluster
-start_docker_env
-execute_robot_test scm -v PREFIX:pre freon/generate.robot
-execute_robot_test scm -v PREFIX:pre freon/validate.robot
-KEEP_RUNNING=false stop_docker_env
-
-# run upgrade scripts
-SCRIPT_DIR=../../libexec/upgrade
-[[ -f "${SCRIPT_DIR}/${OZONE_UPGRADE_TO}.sh" ]] && "${SCRIPT_DIR}/${OZONE_UPGRADE_TO}.sh"
-# update version-specifics
-export OZONE_DIR=/opt/hadoop
-unset OZONE_IMAGE # use apache/ozone-runner defined in docker-compose.yaml
-# shellcheck source=/dev/null
-source "${COMPOSE_DIR}/versions/ozone-${OZONE_UPGRADE_TO}.sh"
-# shellcheck source=/dev/null
-source "${COMPOSE_DIR}/../testlib.sh"
+ copy_results "${d}" "${ALL_RESULT_DIR}"
+done
-# re-start cluster with new version and check after upgrade
-export OZONE_KEEP_RESULTS=true
-start_docker_env
-execute_robot_test scm -v PREFIX:pre freon/validate.robot
-# test write key to old bucket after upgrade
-execute_robot_test scm -v PREFIX:post freon/generate.robot
-execute_robot_test scm -v PREFIX:post freon/validate.robot
-stop_docker_env
+generate_report "upgrade" "${ALL_RESULT_DIR}"
-generate_report
+exit ${RESULT}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
new file mode 100755
index 000000000000..1c6cfe6d2026
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/testlib.sh
@@ -0,0 +1,80 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e -o pipefail
+
+_upgrade_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+: "${OZONE_REPLICATION_FACTOR:=3}"
+: "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}"
+: "${OZONE_VOLUME_OWNER:=}"
+
+source "${_upgrade_dir}/../testlib.sh"
+
+## @description Create the directory tree required for persisting data between
+## compose cluster restarts
+create_data_dir() {
+ if [[ -z "${OZONE_VOLUME}" ]]; then
+ return 1
+ fi
+
+ rm -fr "${OZONE_VOLUME}" 2> /dev/null || sudo rm -fr "${OZONE_VOLUME}"
+ mkdir -p "${OZONE_VOLUME}"/{dn1,dn2,dn3,om,recon,s3g,scm}
+ fix_data_dir_permissions
+}
+
+## @description Run upgrade steps required for going from one logical version to another.
+## @param Starting logical version
+## @param Target logical version
+execute_upgrade_steps() {
+ local -i from=$1
+ local -i to=$2
+
+ if [[ ${from} -ge ${to} ]]; then
+ return
+ fi
+
+ pushd ${_testlib_dir}/../libexec/upgrade
+
+ local v
+ for v in $(seq ${from} $((to-1))); do
+ if [[ -e "v$v.sh" ]]; then
+ source "v$v.sh"
+ fi
+ done
+
+ popd
+}
+
+## @description Pre-upgrade test steps
+first_run() {
+ start_docker_env
+ execute_robot_test scm -v PREFIX:pre freon/generate.robot
+ execute_robot_test scm -v PREFIX:pre freon/validate.robot
+ KEEP_RUNNING=false stop_docker_env
+}
+
+## @description Post-upgrade test steps
+second_run() {
+ export OZONE_KEEP_RESULTS=true
+ start_docker_env
+ execute_robot_test scm -v PREFIX:pre freon/validate.robot
+ # test write key to old bucket after upgrade
+ execute_robot_test scm -v PREFIX:post freon/generate.robot
+ execute_robot_test scm -v PREFIX:post freon/validate.robot
+ stop_docker_env
+}
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrade_to_current.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrade_to_current.sh
new file mode 100755
index 000000000000..872885196e19
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/upgrade_to_current.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script tests upgrade from a previous release to the current
+# binaries. Docker image with Ozone binaries is required for the
+# initial version, while the snapshot version uses Ozone runner image.
+
+set -e -o pipefail
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+: "${OZONE_REPLICATION_FACTOR:=3}"
+: "${OZONE_UPGRADE_FROM:="0.5.0"}"
+: "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}"
+
+export OZONE_REPLICATION_FACTOR OZONE_UPGRADE_FROM OZONE_VOLUME
+
+current_version=1.1.0
+
+source "${COMPOSE_DIR}/testlib.sh"
+
+create_data_dir
+
+prepare_for_binary_image "${OZONE_UPGRADE_FROM}"
+load_version_specifics "${OZONE_UPGRADE_FROM}"
+first_run
+unload_version_specifics
+
+from=$(get_logical_version "${OZONE_UPGRADE_FROM}")
+to=$(get_logical_version "${current_version}")
+execute_upgrade_steps "$from" "$to"
+
+prepare_for_runner_image
+second_run
+
+generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrade_to_release.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrade_to_release.sh
new file mode 100755
index 000000000000..5a9b4028a2e3
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/upgrade/upgrade_to_release.sh
@@ -0,0 +1,51 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script tests upgrade from one release to a later one. Docker
+# image with Ozone binaries are required for both versions.
+
+set -e -o pipefail
+
+COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+export COMPOSE_DIR
+
+: "${OZONE_REPLICATION_FACTOR:=3}"
+: "${OZONE_UPGRADE_FROM:="0.5.0"}"
+: "${OZONE_UPGRADE_TO:="1.0.0"}"
+: "${OZONE_VOLUME:="${COMPOSE_DIR}/data"}"
+
+export OZONE_REPLICATION_FACTOR OZONE_UPGRADE_FROM OZONE_UPGRADE_TO OZONE_VOLUME
+
+source "${COMPOSE_DIR}/testlib.sh"
+
+create_data_dir
+
+prepare_for_binary_image "${OZONE_UPGRADE_FROM}"
+load_version_specifics "${OZONE_UPGRADE_FROM}"
+first_run
+unload_version_specifics
+
+from=$(get_logical_version "${OZONE_UPGRADE_FROM}")
+to=$(get_logical_version "${OZONE_UPGRADE_TO}")
+execute_upgrade_steps "$from" "$to"
+
+prepare_for_binary_image "${OZONE_UPGRADE_TO}"
+load_version_specifics "${OZONE_UPGRADE_TO}"
+second_run
+unload_version_specifics
+
+generate_report
diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md
deleted file mode 100644
index 24cd113469a6..000000000000
--- a/hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-
-
-The scripts in this directory define version-specific behavior required for [`testlib.sh`](../../testlib.sh). For example the `ozone admin` command was renamed from `ozone scmcli` in 1.0.0.
diff --git a/hadoop-ozone/dist/src/main/compose/versions/0.5.0.sh b/hadoop-ozone/dist/src/main/compose/versions/0.5.0.sh
new file mode 100644
index 000000000000..f973a04b73cb
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/versions/0.5.0.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ozone_logical_version() {
+ echo 1
+}
+
+ozone_version_load() {
+ export OZONE_ADMIN_COMMAND=scmcli
+ export OZONE_SAFEMODE_STATUS_COMMAND='ozone scmcli safemode status'
+}
+
+ozone_version_unload() {
+ unset OZONE_ADMIN_COMMAND
+ unset OZONE_SAFEMODE_STATUS_COMMAND
+}
diff --git a/hadoop-ozone/dist/src/main/compose/versions/1.0.0.sh b/hadoop-ozone/dist/src/main/compose/versions/1.0.0.sh
new file mode 100644
index 000000000000..c56aa3319836
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/versions/1.0.0.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ozone_logical_version() {
+ echo 2
+}
+
+ozone_version_load() {
+ export OZONE_ADMIN_COMMAND=admin
+ export OZONE_SAFEMODE_STATUS_COMMAND='ozone admin safemode status --verbose'
+}
+
+ozone_version_unload() {
+ unset OZONE_ADMIN_COMMAND
+ unset OZONE_SAFEMODE_STATUS_COMMAND
+}
diff --git a/hadoop-ozone/dist/src/main/compose/versions/1.1.0.sh b/hadoop-ozone/dist/src/main/compose/versions/1.1.0.sh
new file mode 100644
index 000000000000..c56aa3319836
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/versions/1.1.0.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ozone_logical_version() {
+ echo 2
+}
+
+ozone_version_load() {
+ export OZONE_ADMIN_COMMAND=admin
+ export OZONE_SAFEMODE_STATUS_COMMAND='ozone admin safemode status --verbose'
+}
+
+ozone_version_unload() {
+ unset OZONE_ADMIN_COMMAND
+ unset OZONE_SAFEMODE_STATUS_COMMAND
+}
diff --git a/hadoop-ozone/dist/src/main/compose/versions/README.md b/hadoop-ozone/dist/src/main/compose/versions/README.md
new file mode 100644
index 000000000000..f6dc62cb335a
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/compose/versions/README.md
@@ -0,0 +1,23 @@
+
+
+The scripts in this directory define version-specific behavior required for [`testlib.sh`](../testlib.sh). For example the `ozone admin` command was renamed from `ozone scmcli` in 1.0.0.
+
+For each release a logical version is defined, starting from 1. It is incremented only if the new version needs to execute some scripts to handle some (unintended) incompatibility. For example 1.0.0 or later need to rename some RocksDB column-families when upgrading from 0.5.0.
+
+Interface:
+
+ * `ozone_logical_version`: print the logical version
+ * `ozone_version_load`: define version-specific variables for the test library
+ * `ozone_version_unload`: unset version-specific variables; this reverts test library behavior to the "current" one.
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
index 3fe8e03838ce..95951f4901d6 100644
--- a/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/clients.yaml
@@ -17,15 +17,6 @@
version: "3.4"
services:
- old_client_0_5_0:
- image: apache/ozone:0.5.0
- env_file:
- - docker-config
- volumes:
- - ../..:/opt/ozone
- environment:
- HADOOP_OPTS:
- command: ["sleep","1000000"]
old_client_1_0_0:
image: apache/ozone:1.0.0
env_file:
diff --git a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
index 53b3308c10c2..7d7ee1da4f1d 100755
--- a/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
+++ b/hadoop-ozone/dist/src/main/compose/xcompat/test.sh
@@ -21,7 +21,7 @@ basename=$(basename ${COMPOSE_DIR})
current_version=1.1.0
-# shellcheck source=/dev/null
+# shellcheck source=hadoop-ozone/dist/src/main/compose/testlib.sh
source "${COMPOSE_DIR}/../testlib.sh"
old_client() {
@@ -80,14 +80,13 @@ create_results_dir
# current cluster with various clients
COMPOSE_FILE=new-cluster.yaml:clients.yaml cluster_version=${current_version} test_cross_compatibility
-for cluster_version in 1.0.0 0.5.0; do
- # shellcheck source=/dev/null
- source "${COMPOSE_DIR}/../upgrade/versions/ozone-${cluster_version}.sh"
- # shellcheck source=/dev/null
- source "${COMPOSE_DIR}/../testlib.sh"
+for cluster_version in 1.0.0; do
+ load_version_specifics ${cluster_version}
export OZONE_VERSION=${cluster_version}
COMPOSE_FILE=old-cluster.yaml:clients.yaml test_cross_compatibility
+
+ unload_version_specifics
done
generate_report
diff --git a/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh b/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh
index 8184da401710..95df27f53f8e 100755
--- a/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh
+++ b/hadoop-ozone/dist/src/main/dockerlibexec/entrypoint.sh
@@ -102,7 +102,7 @@ CONF_DESTINATION_DIR="${HADOOP_CONF_DIR:-/opt/hadoop/etc/hadoop}"
#Try to copy the defaults
set +e
if [[ -d "/opt/ozone/etc/hadoop" ]]; then
- cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1
+ cp /opt/ozone/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1
elif [[ -d "/opt/hadoop/etc/hadoop" ]]; then
cp /opt/hadoop/etc/hadoop/* "$CONF_DESTINATION_DIR/" > /dev/null 2>&1
fi
diff --git a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh
index ae810c9f679c..5d866514be9f 100755
--- a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh
+++ b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh
@@ -24,7 +24,7 @@ SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )
set -ex
ALL_RESULT_DIR="$SCRIPT_DIR/result"
-rm "$ALL_RESULT_DIR/*" || true
+rm "$ALL_RESULT_DIR"/* || true
mkdir -p "$ALL_RESULT_DIR"
RESULT=0
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
index cef294f1e8d7..449d82e78a74 100644
--- a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
@@ -30,16 +30,16 @@ Check replicationmanager with explicit host
Should contain ${output} ReplicationManager
Should contain ${output} Running
-Start replicationmanager
- ${output} = Execute ozone admin replicationmanager start
- Should contain ${output} Starting ReplicationManager
- Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Running'
-
Stop replicationmanager
${output} = Execute ozone admin replicationmanager stop
Should contain ${output} Stopping ReplicationManager
Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Not Running'
+Start replicationmanager
+ ${output} = Execute ozone admin replicationmanager start
+ Should contain ${output} Starting ReplicationManager
+ Wait Until Keyword Succeeds 30sec 5sec Execute ozone admin replicationmanager status | grep -q 'is Running'
+
Incomplete command
${output} = Execute And Ignore Error ozone admin replicationmanager
Should contain ${output} Incomplete command
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
index be3ea2e11eff..fc133c104509 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot
@@ -19,8 +19,15 @@ Resource ../ozone-lib/shell.robot
Test Timeout 5 minutes
*** Variables ***
-${KEY_NAME_SUFFIX} ${EMPTY}
+${SUFFIX} ${EMPTY}
*** Test Cases ***
Key Can Be Read
- Key Should Match Local File /vol1/bucket1/key-${KEY_NAME_SUFFIX} /etc/passwd
+ Key Should Match Local File /vol1/bucket1/key-${SUFFIX} /etc/passwd
+
+Dir Can Be Listed
+ Execute ozone fs -ls o3fs://bucket1.vol1/dir-${SUFFIX}
+
+File Can Be Get
+ Execute ozone fs -get o3fs://bucket1.vol1/dir-${SUFFIX}/passwd /tmp/passwd-${SUFFIX}
+ [teardown] Execute rm /tmp/passwd-${SUFFIX}
diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot
index f34aede4c79f..f5c920125714 100644
--- a/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/write.robot
@@ -19,8 +19,14 @@ Resource ../ozone-lib/shell.robot
Test Timeout 5 minutes
*** Variables ***
-${KEY_NAME_SUFFIX} ${EMPTY}
+${SUFFIX} ${EMPTY}
*** Test Cases ***
Key Can Be Written
- Create Key /vol1/bucket1/key-${KEY_NAME_SUFFIX} /etc/passwd
+ Create Key /vol1/bucket1/key-${SUFFIX} /etc/passwd
+
+Dir Can Be Created
+ Execute ozone fs -mkdir o3fs://bucket1.vol1/dir-${SUFFIX}
+
+File Can Be Put
+ Execute ozone fs -put /etc/passwd o3fs://bucket1.vol1/dir-${SUFFIX}/
diff --git a/hadoop-ozone/dist/src/main/smoketest/csi.robot b/hadoop-ozone/dist/src/main/smoketest/csi.robot
index 44ff448fa69b..cd64b67ef5ac 100644
--- a/hadoop-ozone/dist/src/main/smoketest/csi.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/csi.robot
@@ -29,6 +29,6 @@ CSI Socket check
Check if CSI server is started
Wait Until Keyword Succeeds 3min 1sec CSI Socket check
-Test CSI identitiy service
+Test CSI identity service
${result} = Execute csc -e unix:///tmp/csi.sock identity plugin-info
Should Contain ${result} org.apache.hadoop.ozone
diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot
index 1cae40210e9d..524a87d8d11a 100644
--- a/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-api.robot
@@ -29,7 +29,7 @@ ${API_ENDPOINT_URL} http://recon:9888/api/v1
Check if Recon picks up container from OM
Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user
${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/containers
- Should contain ${result} \"ContainerID\":1
+ Should contain ${result} \"ContainerID\"
${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/utilization/fileCount
Should contain ${result} \"fileSize\":2048,\"count\":10
@@ -58,9 +58,9 @@ Check if Recon picks up DN heartbeats
Should contain ${result} datanode_3
${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/clusterState
- Should contain ${result} \"totalDatanodes\":3
- Should contain ${result} \"healthyDatanodes\":3
- Should contain ${result} \"pipelines\":4
+ Should contain ${result} \"totalDatanodes\"
+ Should contain ${result} \"healthyDatanodes\"
+ Should contain ${result} \"pipelines\"
${result} = Execute curl --negotiate -u : -LSs ${API_ENDPOINT_URL}/containers/1/replicaHistory
Should contain ${result} \"containerId\":1
diff --git a/hadoop-ozone/dist/src/main/smoketest/replication/wait.robot b/hadoop-ozone/dist/src/main/smoketest/replication/wait.robot
new file mode 100644
index 000000000000..cf2d621f96e0
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/replication/wait.robot
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Wait for replication to succeed
+Library BuiltIn
+Resource ../commonlib.robot
+Test Timeout 5 minutes
+
+*** Variables ***
+${container} 1
+${count} 3
+
+*** Keywords ***
+Check Container Replicated
+ ${output} = Execute ozone admin container info --json "${container}" | jq '.pipeline.nodes | length'
+ Should Be Equal ${output} ${count}
+
+*** Test Cases ***
+Wait Until Container Replicated
+ Wait Until Keyword Succeeds 5min 10sec Check Container Replicated
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/boto3.robot b/hadoop-ozone/dist/src/main/smoketest/s3/boto3.robot
new file mode 100644
index 000000000000..6a575f39efbd
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/boto3.robot
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation S3 gateway test with Boto3 Client
+Library OperatingSystem
+Library String
+Library Process
+Library BuiltIn
+Resource ../commonlib.robot
+Resource ./commonawslib.robot
+Test Timeout 15 minutes
+Suite Setup Setup s3 tests
+
+*** Variables ***
+${ENDPOINT_URL} http://s3g:9878
+${BUCKET} generated
+
+*** Test Cases ***
+
+Bobo3 Client Test
+ ${result} = Execute python3 /opt/hadoop/smoketest/s3/boto_client.py ${ENDPOINT_URL} ${BUCKET}
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/boto_client.py b/hadoop-ozone/dist/src/main/smoketest/s3/boto_client.py
new file mode 100755
index 000000000000..5185271cded6
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/boto_client.py
@@ -0,0 +1,264 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import random
+import string
+import logging
+import json
+import unittest
+import boto3
+from botocore.client import Config
+from botocore.exceptions import ClientError
+import os.path
+from os import path
+
+class TestBotoClient(unittest.TestCase):
+
+ s3 = None
+ s3_client = None
+ setup_done = False
+ target_bucket = None
+ ozone_endpoint_url = None
+
+ def setUp(self):
+ if TestBotoClient.setup_done:
+ return
+
+ TestBotoClient.ozone_endpoint_url = sys.argv[1]
+ TestBotoClient.target_bucket = sys.argv[2]
+ TestBotoClient.setup_done = True
+
+ TestBotoClient.s3 = boto3.resource('s3',
+ endpoint_url=self.ozone_endpoint_url
+ )
+
+ TestBotoClient.s3_client = boto3.session.Session().client(
+ service_name='s3',
+ endpoint_url=self.ozone_endpoint_url
+ )
+
+ try:
+ response = self.s3_client.create_bucket(Bucket='boto-bucket999')
+ print(response)
+
+ response = self.s3_client.upload_file("README.md", str(self.target_bucket), "README3.md")
+ print(response)
+
+ response = self.s3.Bucket(str(self.target_bucket)).upload_file('README.md','README4.md')
+ print(response)
+
+ self.s3.Bucket(str(self.target_bucket)).upload_file('README.md','README10.md')
+ print(response)
+ except ClientError as e:
+ logging.error(e)
+ print(e)
+
+ f = open('multiUpload.gz',"wb")
+ f.seek(10485760)
+ f.write(b"\0")
+ f.close()
+ self.s3.Bucket(str(self.target_bucket)).upload_file('./multiUpload.gz','multiUpload.1.gz')
+ self.s3.Bucket(str(self.target_bucket)).upload_file('./multiUpload.gz','multiUpload.2.gz')
+
+ def test_create_bucket(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ letters = string.ascii_lowercase
+ bucket_name = ''.join(random.choice(letters) for i in range(10))
+ response = self.s3_client.create_bucket(Bucket='bucket-' + str(bucket_name))
+ print(response)
+ self.assertTrue(str(bucket_name) in response.get('Location'))
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_list_bucket(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ response = self.s3_client.list_buckets()
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ print(response)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_head_bucket(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ response = self.s3_client.head_bucket(Bucket=self.target_bucket)
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ print(response)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_bucket_delete(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ response = self.s3_client.delete_bucket(Bucket='boto-bucket999')
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 204)
+ print(response)
+ except ClientError as e:
+ logging.error(e)
+ return False
+ return True
+
+ def test_upload_file(self):
+ self.assertTrue(self.s3 is not None)
+ try:
+ self.s3.Bucket(str(self.target_bucket)).upload_file('./README.md','README1.md')
+ response = self.s3_client.head_object(Bucket=str(self.target_bucket), Key='README1.md')
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ print(response)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_download_file(self):
+ self.assertTrue(self.s3 is not None)
+ try:
+ self.s3.Bucket(str(self.target_bucket)).download_file('README10.md', 'download.md')
+ self.assertTrue(path.exists("./download.md"))
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_delete_objects(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ response = self.s3_client.delete_objects(
+ Bucket=str(self.target_bucket),
+ Delete={
+ 'Objects': [
+ {
+ 'Key': 'README4.md',
+ },
+ {
+ 'Key': 'README3.md',
+ },
+ ],
+ 'Quiet': False,
+ },
+ )
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ print(response)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_head_object(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ response = self.s3_client.head_object(Bucket=str(self.target_bucket), Key='README10.md')
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ print(response)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+ def test_multi_uploads(self):
+ self.assertTrue(self.s3_client is not None)
+ try:
+ lts = string.ascii_lowercase
+ key_name = ''.join(random.choice(lts) for i in range(10))
+ response = self.s3_client.create_multipart_upload(Bucket=str(self.target_bucket), Key=str(key_name))
+ print(response)
+ uid=response['UploadId']
+
+ copy1 = self.target_bucket + "/multiUpload.1.gz"
+ response = self.s3_client.upload_part_copy(
+ Bucket=str(self.target_bucket),
+ CopySource=str(copy1),
+ Key=str(key_name),
+ PartNumber=1,
+ UploadId=str(uid)
+ )
+ etag1=response.get('CopyPartResult').get('ETag')
+
+ copy2 = self.target_bucket + "/multiUpload.2.gz"
+ response = self.s3_client.upload_part_copy(
+ Bucket=str(self.target_bucket),
+ CopySource=str(copy2),
+ Key=str(key_name),
+ PartNumber=2,
+ UploadId=str(uid)
+ )
+ etag2=response.get('CopyPartResult').get('ETag')
+
+ response = self.s3_client.complete_multipart_upload(
+ Bucket=str(self.target_bucket),
+ Key=str(key_name),
+ MultipartUpload={
+ 'Parts': [
+ {
+ 'ETag': str(etag1),
+ 'PartNumber': 1,
+ },
+ {
+ 'ETag': str(etag2),
+ 'PartNumber': 2,
+ },
+ ],
+ },
+ UploadId=str(uid),
+ )
+ self.assertTrue(response.get('ResponseMetadata').get('HTTPStatusCode') == 200)
+ print(response)
+ except ClientError as e:
+ print(e)
+ logging.error(e)
+ return False
+ return True
+
+if __name__ == '__main__':
+ #unittest.main()
+ suite = unittest.TestSuite()
+
+ suite.addTest(TestBotoClient('test_create_bucket'))
+ suite.addTest(TestBotoClient('test_list_bucket'))
+ suite.addTest(TestBotoClient('test_head_bucket'))
+ suite.addTest(TestBotoClient('test_bucket_delete'))
+ suite.addTest(TestBotoClient('test_upload_file'))
+ suite.addTest(TestBotoClient('test_download_file'))
+ suite.addTest(TestBotoClient('test_delete_objects'))
+ suite.addTest(TestBotoClient('test_head_object'))
+ suite.addTest(TestBotoClient('test_multi_uploads'))
+
+ result = unittest.TextTestRunner(verbosity=2).run(suite)
+
+ if result.wasSuccessful():
+ print("Boto3 Client Test PASSED!")
+ exit(0)
+ else:
+ print("Boto3 Client Test FAILED!")
+ exit(1)
diff --git a/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh b/hadoop-ozone/dist/src/shell/upgrade/v1.sh
similarity index 85%
rename from hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh
rename to hadoop-ozone/dist/src/shell/upgrade/v1.sh
index 65739787ee67..1442ffdf799e 100755
--- a/hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh
+++ b/hadoop-ozone/dist/src/shell/upgrade/v1.sh
@@ -20,4 +20,4 @@ SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
: "${SCM_DIR:="${OZONE_VOLUME}/scm"}"
: "${OZONE_RUNNER_VERSION:="20200625-1"}"
-docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/1.0.0":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh
+docker run --rm -v "${SCM_DIR}":/scm -v "${SCRIPT_DIR}/v1":/upgrade -w /scm/metadata apache/ozone-runner:"${OZONE_RUNNER_VERSION}" /upgrade/01-migrate-scm-db.sh
diff --git a/hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh b/hadoop-ozone/dist/src/shell/upgrade/v1/01-migrate-scm-db.sh
similarity index 100%
rename from hadoop-ozone/dist/src/shell/upgrade/1.0.0/01-migrate-scm-db.sh
rename to hadoop-ozone/dist/src/shell/upgrade/v1/01-migrate-scm-db.sh
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
new file mode 100644
index 000000000000..7c3a8a9f6ba7
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMDbCheckpointServlet.java
@@ -0,0 +1,182 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm;
+
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletOutputStream;
+import javax.servlet.WriteListener;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
+import org.apache.hadoop.hdds.scm.server.SCMDBCheckpointServlet;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import org.apache.commons.io.FileUtils;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.junit.rules.Timeout;
+import org.mockito.Matchers;
+import static org.mockito.Mockito.doCallRealMethod;
+import static org.mockito.Mockito.doNothing;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+/**
+ * Class used for testing the OM DB Checkpoint provider servlet.
+ */
+public class TestSCMDbCheckpointServlet {
+ private MiniOzoneCluster cluster = null;
+ private StorageContainerManager scm;
+ private SCMMetrics scmMetrics;
+ private OzoneConfiguration conf;
+ private String clusterId;
+ private String scmId;
+ private String omId;
+
+ @Rule
+ public Timeout timeout = new Timeout(240000);
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+ /**
+ * Create a MiniDFSCluster for testing.
+ *
- * Ozone is made active by setting OZONE_ENABLED = true
- *
- * @throws IOException
- */
@BeforeClass
public static void init() throws Exception {
testDir = GenericTestUtils.getTestDir(
@@ -115,6 +126,9 @@ public static void init() throws Exception {
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(10)
.setScmId(SCM_ID)
+ .setBlockSize(BLOCK_SIZE)
+ .setChunkSize(CHUNK_SIZE)
+ .setStreamBufferSizeUnit(StorageUnit.BYTES)
.setCertificateClient(certificateClientTest)
.build();
cluster.getOzoneManager().startSecretManager();
@@ -124,6 +138,7 @@ public static void init() throws Exception {
storageContainerLocationClient =
cluster.getStorageContainerLocationClient();
ozoneManager = cluster.getOzoneManager();
+ ozoneManager.setMinMultipartUploadPartSize(MPU_PART_MIN_SIZE);
TestOzoneRpcClient.setCluster(cluster);
TestOzoneRpcClient.setOzClient(ozClient);
TestOzoneRpcClient.setOzoneManager(ozoneManager);
@@ -136,11 +151,9 @@ public static void init() throws Exception {
createKey(TEST_KEY, cluster.getOzoneManager().getKmsProvider(), conf);
}
-
-
- /**
- * Close OzoneClient and shutdown MiniOzoneCluster.
- */
+ /**
+ * Close OzoneClient and shutdown MiniOzoneCluster.
+ */
@AfterClass
public static void shutdown() throws IOException {
if(ozClient != null) {
@@ -271,6 +284,14 @@ public void testKeyWithEncryptionAndGdpr() throws Exception {
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
String objectKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
keyName);
+
+ GenericTestUtils.waitFor(() -> {
+ try {
+ return omMetadataManager.getDeletedTable().isExist(objectKey);
+ } catch (IOException e) {
+ return false;
+ }
+ }, 500, 100000);
RepeatedOmKeyInfo deletedKeys =
omMetadataManager.getDeletedTable().get(objectKey);
Map deletedKeyMetadata =
@@ -323,4 +344,151 @@ private static void createKey(String keyName, KeyProvider
provider.createKey(keyName, options);
provider.flush();
}
+
+ @Test
+ public void testMPUwithOnePart() throws Exception {
+ testMultipartUploadWithEncryption(1);
+ }
+
+ @Test
+ public void testMPUwithTwoParts() throws Exception {
+ testMultipartUploadWithEncryption(2);
+ }
+
+ public void testMultipartUploadWithEncryption(int numParts) throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = "mpu_test_key_" + numParts;
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ BucketArgs bucketArgs = BucketArgs.newBuilder()
+ .setBucketEncryptionKey(TEST_KEY).build();
+ volume.createBucket(bucketName, bucketArgs);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ // Initiate multipart upload
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ // Upload Parts
+ Map partsMap = new TreeMap<>();
+ List partsData = new ArrayList<>();
+ int keySize = 0;
+ for (int i = 1; i <= numParts; i++) {
+ // Generate random data with different sizes for each part.
+ // Adding a random int with a cap at 8K (the default crypto buffer
+ // size) to get parts whose last byte does not coincide with crypto
+ // buffer boundary.
+ byte[] data = generateRandomData((MPU_PART_MIN_SIZE * i) +
+ RANDOM.nextInt(DEFAULT_CRYPTO_BUFFER_SIZE));
+ String partName = uploadPart(bucket, keyName, uploadID, i, data);
+ partsMap.put(i, partName);
+ partsData.add(data);
+ keySize += data.length;
+ }
+
+ // Combine the parts data into 1 byte array for verification
+ byte[] inputData = new byte[keySize];
+ int dataCopied = 0;
+ for (int i = 1; i <= numParts; i++) {
+ byte[] partBytes = partsData.get(i - 1);
+ System.arraycopy(partBytes, 0, inputData, dataCopied, partBytes.length);
+ dataCopied += partBytes.length;
+ }
+
+ // Complete MPU
+ completeMultipartUpload(bucket, keyName, uploadID, partsMap);
+
+ // Read different data lengths and starting from different offsets and
+ // verify the data matches.
+ Random random = new Random();
+ int randomSize = random.nextInt(keySize/2);
+ int randomOffset = random.nextInt(keySize - randomSize);
+
+ int[] readDataSizes = {keySize, keySize / 3 + 1, BLOCK_SIZE,
+ BLOCK_SIZE * 2 + 1, CHUNK_SIZE, CHUNK_SIZE / 4 - 1,
+ DEFAULT_CRYPTO_BUFFER_SIZE, DEFAULT_CRYPTO_BUFFER_SIZE / 2, 1,
+ randomSize};
+
+ int[] readFromPositions = {0, DEFAULT_CRYPTO_BUFFER_SIZE + 10, CHUNK_SIZE,
+ BLOCK_SIZE - DEFAULT_CRYPTO_BUFFER_SIZE + 1, BLOCK_SIZE, keySize / 3,
+ keySize - 1, randomOffset};
+
+ // Create an input stream to read the data
+ OzoneInputStream inputStream = bucket.readKey(keyName);
+ Assert.assertTrue(inputStream instanceof MultipartCryptoKeyInputStream);
+ MultipartCryptoKeyInputStream cryptoInputStream =
+ (MultipartCryptoKeyInputStream) inputStream;
+
+ for (int readDataLen : readDataSizes) {
+ for (int readFromPosition : readFromPositions) {
+ // Check that offset + buffer size does not exceed the key size
+ if (readFromPosition + readDataLen > keySize) {
+ continue;
+ }
+
+ byte[] readData = new byte[readDataLen];
+ cryptoInputStream.seek(readFromPosition);
+ inputStream.read(readData, 0, readDataLen);
+
+ assertReadContent(inputData, readData, readFromPosition);
+ }
+ }
+ }
+
+ private static byte[] generateRandomData(int length) {
+ byte[] bytes = new byte[length];
+ RANDOM.nextBytes(bytes);
+ return bytes;
+ }
+
+ private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
+ ReplicationType replicationType, ReplicationFactor replicationFactor)
+ throws Exception {
+ OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+ replicationType, replicationFactor);
+
+ String uploadID = multipartInfo.getUploadID();
+ Assert.assertNotNull(uploadID);
+ return uploadID;
+ }
+
+ private String uploadPart(OzoneBucket bucket, String keyName,
+ String uploadID, int partNumber, byte[] data) throws Exception {
+ OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName,
+ data.length, partNumber, uploadID);
+ ozoneOutputStream.write(data, 0, data.length);
+ ozoneOutputStream.close();
+
+ OmMultipartCommitUploadPartInfo omMultipartCommitUploadPartInfo =
+ ozoneOutputStream.getCommitUploadPartInfo();
+
+ Assert.assertNotNull(omMultipartCommitUploadPartInfo);
+ Assert.assertNotNull(omMultipartCommitUploadPartInfo.getPartName());
+ return omMultipartCommitUploadPartInfo.getPartName();
+ }
+
+ private void completeMultipartUpload(OzoneBucket bucket, String keyName,
+ String uploadID, Map partsMap) throws Exception {
+ OmMultipartUploadCompleteInfo omMultipartUploadCompleteInfo = bucket
+ .completeMultipartUpload(keyName, uploadID, partsMap);
+
+ Assert.assertNotNull(omMultipartUploadCompleteInfo);
+ Assert.assertEquals(omMultipartUploadCompleteInfo.getBucket(), bucket
+ .getName());
+ Assert.assertEquals(omMultipartUploadCompleteInfo.getVolume(), bucket
+ .getVolumeName());
+ Assert.assertEquals(omMultipartUploadCompleteInfo.getKey(), keyName);
+ Assert.assertNotNull(omMultipartUploadCompleteInfo.getHash());
+ }
+
+ private static void assertReadContent(byte[] inputData, byte[] readData,
+ int offset) {
+ byte[] inputDataForComparison = Arrays.copyOfRange(inputData, offset,
+ offset + readData.length);
+ Assert.assertArrayEquals("Read data does not match input data at offset " +
+ offset + " and length " + readData.length,
+ inputDataForComparison, readData);
+ }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 4508ade10150..cd32f4f1fc56 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -89,6 +89,7 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
@@ -3069,7 +3070,6 @@ private byte[] generateData(int size, byte val) {
return chars;
}
-
private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val)
throws Exception {
// Initiate Multipart upload request
@@ -3098,11 +3098,9 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val)
partsMap.put(3, partName);
length += part3.getBytes(UTF_8).length;
-
// Complete multipart upload request
completeMultipartUpload(bucket, keyName, uploadID, partsMap);
-
//Now Read the key which has been completed multipart upload.
byte[] fileContent = new byte[data.length + data.length + part3.getBytes(
UTF_8).length];
@@ -3122,8 +3120,19 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val)
sb.append(part2);
sb.append(part3);
Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8));
- }
+ String ozoneKey = ozoneManager.getMetadataManager()
+ .getOzoneKey(bucket.getVolumeName(), bucket.getName(), keyName);
+ OmKeyInfo omKeyInfo = ozoneManager.getMetadataManager().getKeyTable()
+ .get(ozoneKey);
+
+ OmKeyLocationInfoGroup latestVersionLocations =
+ omKeyInfo.getLatestVersionLocations();
+ Assert.assertEquals(true, latestVersionLocations.isMultipartKey());
+ latestVersionLocations.getBlocksLatestVersionOnly()
+ .forEach(omKeyLocationInfo ->
+ Assert.assertTrue(omKeyLocationInfo.getPartNumber() != -1));
+ }
private String initiateMultipartUpload(OzoneBucket bucket, String keyName,
ReplicationType replicationType, ReplicationFactor replicationFactor)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index 2a52c96eaf7e..3a90ae191d04 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -39,6 +39,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.commons.io.FileUtils;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
@@ -46,13 +47,11 @@
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
-import static org.apache.hadoop.ozone.om.OMDBCheckpointServlet.writeOmDBCheckpointToStream;
-
-import org.apache.hadoop.security.UserGroupInformation;
-import org.junit.After;
+import static org.apache.hadoop.ozone.om.OMDBCheckpointServlet.writeDBCheckpointToStream;
+import org.junit.AfterClass;
import org.junit.Assert;
import static org.junit.Assert.assertNotNull;
-import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
@@ -67,15 +66,15 @@
* Class used for testing the OM DB Checkpoint provider servlet.
*/
public class TestOMDbCheckpointServlet {
- private MiniOzoneCluster cluster = null;
- private OMMetrics omMetrics;
- private OzoneConfiguration conf;
- private String clusterId;
- private String scmId;
- private String omId;
+ private static MiniOzoneCluster cluster = null;
+ private static OMMetrics omMetrics;
+ private static OzoneConfiguration conf;
+ private static String clusterId;
+ private static String scmId;
+ private static String omId;
@Rule
- public Timeout timeout = new Timeout(60000);
+ public Timeout timeout = new Timeout(240000);
@Rule
public TemporaryFolder folder = new TemporaryFolder();
@@ -86,8 +85,8 @@ public class TestOMDbCheckpointServlet {
*
* @throws IOException
*/
- @Before
- public void init() throws Exception {
+ @BeforeClass
+ public static void init() throws Exception {
conf = new OzoneConfiguration();
clusterId = UUID.randomUUID().toString();
scmId = UUID.randomUUID().toString();
@@ -99,6 +98,7 @@ public void init() throws Exception {
.setClusterId(clusterId)
.setScmId(scmId)
.setOmId(omId)
+ .setNumDatanodes(1)
.build();
cluster.waitForClusterToBeReady();
omMetrics = cluster.getOzoneManager().getMetrics();
@@ -107,8 +107,8 @@ public void init() throws Exception {
/**
* Shutdown MiniDFSCluster.
*/
- @After
- public void shutdown() {
+ @AfterClass
+ public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
@@ -116,13 +116,19 @@ public void shutdown() {
@Test
public void testDoGet() throws ServletException, IOException {
-
File tempFile = null;
try {
OMDBCheckpointServlet omDbCheckpointServletMock =
mock(OMDBCheckpointServlet.class);
+ final OzoneManager om = cluster.getOzoneManager();
+
doCallRealMethod().when(omDbCheckpointServletMock).init();
+ doCallRealMethod().when(omDbCheckpointServletMock).initialize(
+ om.getMetadataManager().getStore(),
+ om.getMetrics().getDBCheckpointMetrics(),
+ om.getAclsEnabled(),
+ om.getOzoneAdmins(om.getConfiguration()));
HttpServletRequest requestMock = mock(HttpServletRequest.class);
// Return current user short name when asked
@@ -167,16 +173,20 @@ public void write(int b) throws IOException {
responseMock);
omDbCheckpointServletMock.init();
- long initialCheckpointCount = omMetrics.getNumCheckpoints();
+ long initialCheckpointCount =
+ omMetrics.getDBCheckpointMetrics().getNumCheckpoints();
omDbCheckpointServletMock.doGet(requestMock, responseMock);
Assert.assertTrue(tempFile.length() > 0);
Assert.assertTrue(
- omMetrics.getLastCheckpointCreationTimeTaken() > 0);
+ omMetrics.getDBCheckpointMetrics().
+ getLastCheckpointCreationTimeTaken() > 0);
Assert.assertTrue(
- omMetrics.getLastCheckpointStreamingTimeTaken() > 0);
- Assert.assertTrue(omMetrics.getNumCheckpoints() > initialCheckpointCount);
+ omMetrics.getDBCheckpointMetrics().
+ getLastCheckpointStreamingTimeTaken() > 0);
+ Assert.assertTrue(omMetrics.getDBCheckpointMetrics().
+ getNumCheckpoints() > initialCheckpointCount);
} finally {
FileUtils.deleteQuietly(tempFile);
}
@@ -207,7 +217,7 @@ public void testWriteCheckpointToOutputStream() throws Exception {
new File(Paths.get(testDirName, "output_file.tgz").toString());
TestDBCheckpoint dbCheckpoint = new TestDBCheckpoint(
Paths.get(testDirName));
- writeOmDBCheckpointToStream(dbCheckpoint,
+ writeDBCheckpointToStream(dbCheckpoint,
new FileOutputStream(outputFile));
assertNotNull(outputFile);
} finally {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
index eb21a81c27d8..45f63bf3716d 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
@@ -141,6 +141,7 @@ public void testDatanodeRegistrationAndReports() throws Exception {
GenericTestUtils.LogCapturer logCapturer =
GenericTestUtils.LogCapturer.captureLogs(ReconNodeManager.LOG);
+ GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG);
reconScm.getEventQueue().fireEvent(CLOSE_CONTAINER,
containerInfo.containerID());
GenericTestUtils.waitFor(() -> logCapturer.getOutput()
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 1672796fa0a2..017bf82a9a99 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -769,12 +769,15 @@ message KeyLocation {
// get the up to date pipeline information. This will need o3fs
// provide not only a OM delegation token but also a SCM delegation token
optional hadoop.hdds.Pipeline pipeline = 7;
+
+ optional int32 partNumber = 9 [default = -1];
}
message KeyLocationList {
optional uint64 version = 1;
repeated KeyLocation keyLocations = 2;
optional FileEncryptionInfoProto fileEncryptionInfo = 3;
+ optional bool isMultipartKey = 4 [default = false];
}
message KeyInfo {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 7cc61ecfad36..4a968fd5910f 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -621,7 +621,7 @@ public void commitKey(OmKeyArgs args, long clientID) throws IOException {
keyInfo.setModificationTime(Time.now());
//update the block length for each block
- keyInfo.updateLocationInfoList(locationInfoList);
+ keyInfo.updateLocationInfoList(locationInfoList, false);
metadataManager.getStore().move(
openKey,
objectKey,
@@ -1116,7 +1116,7 @@ public OmMultipartCommitUploadPartInfo commitMultipartUploadPart(
// set the data size and location info list
keyInfo.setDataSize(omKeyArgs.getDataSize());
- keyInfo.updateLocationInfoList(omKeyArgs.getLocationInfoList());
+ keyInfo.updateLocationInfoList(omKeyArgs.getLocationInfoList(), true);
partName = metadataManager.getOzoneKey(volumeName, bucketName, keyName)
+ clientID;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
index dcb539272659..39868b44a013 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java
@@ -19,42 +19,15 @@
package org.apache.hadoop.ozone.om;
import javax.servlet.ServletException;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletResponse;
-import java.io.File;
-import java.io.FileInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.Collection;
-import java.util.stream.Collectors;
-import java.util.stream.Stream;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.db.DBCheckpoint;
-import org.apache.hadoop.hdds.utils.db.DBStore;
-import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.hdds.utils.DBCheckpointServlet;
import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.commons.compress.archivers.ArchiveEntry;
-import org.apache.commons.compress.archivers.ArchiveOutputStream;
-import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream;
-import org.apache.commons.compress.compressors.CompressorException;
-import org.apache.commons.compress.compressors.CompressorOutputStream;
-import org.apache.commons.compress.compressors.CompressorStreamFactory;
-import org.apache.commons.compress.utils.IOUtils;
-import org.apache.commons.lang3.StringUtils;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
-import static org.apache.hadoop.ozone.OzoneConsts.OZONE_DB_CHECKPOINT_REQUEST_FLUSH;
-
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import java.io.IOException;
+
/**
* Provides the current checkpoint Snapshot of the OM DB. (tar.gz)
*
@@ -68,21 +41,16 @@
* If Kerberos is not enabled, simply append the login user name to
* `ozone.administrator`, e.g. `scm`
*/
-public class OMDBCheckpointServlet extends HttpServlet {
+public class OMDBCheckpointServlet extends DBCheckpointServlet {
private static final Logger LOG =
LoggerFactory.getLogger(OMDBCheckpointServlet.class);
private static final long serialVersionUID = 1L;
- private transient OzoneManager om;
- private transient DBStore omDbStore;
- private transient OMMetrics omMetrics;
- private transient DataTransferThrottler throttler = null;
-
@Override
public void init() throws ServletException {
- om = (OzoneManager) getServletContext()
+ OzoneManager om = (OzoneManager) getServletContext()
.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE);
if (om == null) {
@@ -90,185 +58,13 @@ public void init() throws ServletException {
return;
}
- omDbStore = om.getMetadataManager().getStore();
- omMetrics = om.getMetrics();
-
- OzoneConfiguration configuration = om.getConfiguration();
- long transferBandwidth = configuration.getLongBytes(
- OMConfigKeys.OZONE_DB_CHECKPOINT_TRANSFER_RATE_KEY,
- OMConfigKeys.OZONE_DB_CHECKPOINT_TRANSFER_RATE_DEFAULT);
-
- if (transferBandwidth > 0) {
- throttler = new DataTransferThrottler(transferBandwidth);
- }
- }
-
- private boolean hasPermission(String username) {
- // Check ACL for dbCheckpoint only when global Ozone ACL is enabled
- if (om.getAclsEnabled()) {
- // Only Ozone admins are allowed
- try {
- Collection admins = om.getOzoneAdmins(om.getConfiguration());
- if (admins.contains(OZONE_ADMINISTRATORS_WILDCARD) ||
- admins.contains(username)) {
- return true;
- }
- } catch (IOException e) {
- LOG.warn("Error checking permission: {}", e.getMessage());
- }
- return false;
- } else {
- return true;
- }
- }
-
- /**
- * Process a GET request for the Ozone Manager DB checkpoint snapshot.
- *
- * @param request The servlet request we are processing
- * @param response The servlet response we are creating
- */
- @Override
- public void doGet(HttpServletRequest request, HttpServletResponse response) {
-
- LOG.info("Received request to obtain OM DB checkpoint snapshot");
- if (omDbStore == null) {
- LOG.error(
- "Unable to process metadata snapshot request. DB Store is null");
- response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
- return;
- }
-
- // Check ACL for dbCheckpoint only when global Ozone ACL is enable
- if (om.getAclsEnabled()) {
- final java.security.Principal userPrincipal = request.getUserPrincipal();
- if (userPrincipal == null) {
- final String remoteUser = request.getRemoteUser();
- LOG.error("Permission denied: Unauthorized access to /dbCheckpoint,"
- + " no user principal found. Current login user is {}.",
- remoteUser != null ? "'" + remoteUser + "'" : "UNKNOWN");
- response.setStatus(HttpServletResponse.SC_FORBIDDEN);
- return;
- } else {
- final String userPrincipalName = userPrincipal.getName();
- if (!hasPermission(userPrincipalName)) {
- LOG.error("Permission denied: User principal '{}' does not have"
- + " access to /dbCheckpoint.\nThis can happen when Ozone Manager"
- + " is started with a different user.\nPlease append '{}' to OM"
- + " 'ozone.administrators' config and restart OM to grant current"
- + " user access to this endpoint.",
- userPrincipalName, userPrincipalName);
- response.setStatus(HttpServletResponse.SC_FORBIDDEN);
- return;
- }
- LOG.debug("Granted user principal '{}' access to /dbCheckpoint.",
- userPrincipalName);
- }
- }
-
- DBCheckpoint checkpoint = null;
try {
-
- boolean flush = false;
- String flushParam =
- request.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH);
- if (StringUtils.isNotEmpty(flushParam)) {
- flush = Boolean.valueOf(flushParam);
- }
-
- checkpoint = omDbStore.getCheckpoint(flush);
- if (checkpoint == null || checkpoint.getCheckpointLocation() == null) {
- LOG.error("Unable to process metadata snapshot request. " +
- "Checkpoint request returned null.");
- response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
- return;
- }
- omMetrics.setLastCheckpointCreationTimeTaken(
- checkpoint.checkpointCreationTimeTaken());
-
- Path file = checkpoint.getCheckpointLocation().getFileName();
- if (file == null) {
- return;
- }
- response.setContentType("application/x-tgz");
- response.setHeader("Content-Disposition",
- "attachment; filename=\"" +
- file.toString() + ".tgz\"");
-
- Instant start = Instant.now();
- writeOmDBCheckpointToStream(checkpoint,
- response.getOutputStream());
- Instant end = Instant.now();
-
- long duration = Duration.between(start, end).toMillis();
- LOG.info("Time taken to write the checkpoint to response output " +
- "stream: {} milliseconds", duration);
- omMetrics.setLastCheckpointStreamingTimeTaken(duration);
- omMetrics.incNumCheckpoints();
- } catch (Exception e) {
- LOG.error(
- "Unable to process metadata snapshot request. ", e);
- response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
- omMetrics.incNumCheckpointFails();
- } finally {
- if (checkpoint != null) {
- try {
- checkpoint.cleanupCheckpoint();
- } catch (IOException e) {
- LOG.error("Error trying to clean checkpoint at {} .",
- checkpoint.getCheckpointLocation().toString());
- }
- }
- }
- }
-
- /**
- * Write OM DB Checkpoint to an output stream as a compressed file (tgz).
- *
- * @param checkpoint checkpoint file
- * @param destination desination output stream.
- * @throws IOException
- */
- public static void writeOmDBCheckpointToStream(DBCheckpoint checkpoint,
- OutputStream destination)
- throws IOException {
-
- try (CompressorOutputStream gzippedOut = new CompressorStreamFactory()
- .createCompressorOutputStream(CompressorStreamFactory.GZIP,
- destination)) {
-
- try (ArchiveOutputStream archiveOutputStream =
- new TarArchiveOutputStream(gzippedOut)) {
-
- Path checkpointPath = checkpoint.getCheckpointLocation();
- try (Stream files = Files.list(checkpointPath)) {
- for (Path path : files.collect(Collectors.toList())) {
- if (path != null) {
- Path fileName = path.getFileName();
- if (fileName != null) {
- includeFile(path.toFile(), fileName.toString(),
- archiveOutputStream);
- }
- }
- }
- }
- }
- } catch (CompressorException e) {
- throw new IOException(
- "Can't compress the checkpoint: " +
- checkpoint.getCheckpointLocation(), e);
- }
- }
-
- private static void includeFile(File file, String entryName,
- ArchiveOutputStream archiveOutputStream)
- throws IOException {
- ArchiveEntry archiveEntry =
- archiveOutputStream.createArchiveEntry(file, entryName);
- archiveOutputStream.putArchiveEntry(archiveEntry);
- try (FileInputStream fis = new FileInputStream(file)) {
- IOUtils.copy(fis, archiveOutputStream);
+ initialize(om.getMetadataManager().getStore(),
+ om.getMetrics().getDBCheckpointMetrics(),
+ om.getAclsEnabled(),
+ om.getOzoneAdmins(om.getConfiguration()));
+ } catch (IOException e) {
+ LOG.error("Error in getOzoneAdmins: {}", e.getMessage());
}
- archiveOutputStream.closeArchiveEntry();
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index bc483434ccd2..7ce0a16ed081 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -20,12 +20,12 @@
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.utils.DBCheckpointMetrics;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
-import org.apache.hadoop.metrics2.lib.MutableGaugeLong;
/**
* This class is for maintaining Ozone Manager statistics.
@@ -131,14 +131,6 @@ public class OMMetrics {
// few minutes before restart may not be included in this count.
private @Metric MutableCounterLong numKeys;
-
-
- // Metrics to track checkpointing statistics from last run.
- private @Metric MutableGaugeLong lastCheckpointCreationTimeTaken;
- private @Metric MutableGaugeLong lastCheckpointStreamingTimeTaken;
- private @Metric MutableCounterLong numCheckpoints;
- private @Metric MutableCounterLong numCheckpointFails;
-
private @Metric MutableCounterLong numBucketS3Creates;
private @Metric MutableCounterLong numBucketS3CreateFails;
private @Metric MutableCounterLong numBucketS3Deletes;
@@ -163,7 +155,10 @@ public class OMMetrics {
private @Metric MutableCounterLong numTrashRootsEnqueued;
private @Metric MutableCounterLong numTrashRootsProcessed;
+ private final DBCheckpointMetrics dbCheckpointMetrics;
+
public OMMetrics() {
+ dbCheckpointMetrics = DBCheckpointMetrics.create("OM Metrics");
}
public static OMMetrics create() {
@@ -173,6 +168,10 @@ public static OMMetrics create() {
new OMMetrics());
}
+ public DBCheckpointMetrics getDBCheckpointMetrics() {
+ return dbCheckpointMetrics;
+ }
+
public void incNumS3BucketCreates() {
numBucketOps.incr();
numBucketS3Creates.incr();
@@ -553,22 +552,6 @@ public void incNumGetServiceListFails() {
numGetServiceListFails.incr();
}
- public void setLastCheckpointCreationTimeTaken(long val) {
- this.lastCheckpointCreationTimeTaken.set(val);
- }
-
- public void setLastCheckpointStreamingTimeTaken(long val) {
- this.lastCheckpointStreamingTimeTaken.set(val);
- }
-
- public void incNumCheckpoints() {
- numCheckpoints.incr();
- }
-
- public void incNumCheckpointFails() {
- numCheckpointFails.incr();
- }
-
public void incNumOpenKeyDeleteRequests() {
numOpenKeyDeleteRequests.incr();
}
@@ -842,21 +825,6 @@ public long getNumAbortMultipartUploadFails() {
return numAbortMultipartUploadFails.value();
}
- @VisibleForTesting
- public long getLastCheckpointCreationTimeTaken() {
- return lastCheckpointCreationTimeTaken.value();
- }
-
- @VisibleForTesting
- public long getNumCheckpoints() {
- return numCheckpoints.value();
- }
-
- @VisibleForTesting
- public long getLastCheckpointStreamingTimeTaken() {
- return lastCheckpointStreamingTimeTaken.value();
- }
-
public long getNumOpenKeyDeleteRequests() {
return numOpenKeyDeleteRequests.value();
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 0eac529c84d1..556e7e6a34ee 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -335,6 +335,8 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
// execution, we can get from ozoneManager.
private long maxUserVolumeCount;
+ private int minMultipartUploadPartSize = OzoneConsts.OM_MULTIPART_MIN_SIZE;
+
private final ScmClient scmClient;
private final long scmBlockSize;
private final int preallocateBlocksMax;
@@ -3881,4 +3883,13 @@ private void instantiatePrepareState(boolean withNewSnapshot)
prepareState.cancelPrepare();
}
}
+
+ public int getMinMultipartUploadPartSize() {
+ return minMultipartUploadPartSize;
+ }
+
+ @VisibleForTesting
+ public void setMinMultipartUploadPartSize(int partSizeForTest) {
+ this.minMultipartUploadPartSize = partSizeForTest;
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index f34f5bcf3933..5ef292593eb4 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -187,7 +187,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// Update the block length for each block
List allocatedLocationInfoList =
omKeyInfo.getLatestVersionLocations().getLocationList();
- omKeyInfo.updateLocationInfoList(locationInfoList);
+ omKeyInfo.updateLocationInfoList(locationInfoList, false);
// Set the UpdateID to current transactionLogIndex
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
index fa85f707c208..80a63e4a8544 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java
@@ -170,7 +170,11 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
newKeyArgs.setKeyName(keyPath);
- generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager);
+ if (keyArgs.getIsMultipartKey()) {
+ getFileEncryptionInfoForMpuKey(keyArgs, newKeyArgs, ozoneManager);
+ } else {
+ generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager);
+ }
newCreateKeyRequest =
createKeyRequest.toBuilder().setKeyArgs(newKeyArgs)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index bb671df94c41..814e0651e90d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -546,6 +546,37 @@ protected void generateRequiredEncryptionInfo(KeyArgs keyArgs,
}
}
+ protected void getFileEncryptionInfoForMpuKey(KeyArgs keyArgs,
+ KeyArgs.Builder newKeyArgs, OzoneManager ozoneManager)
+ throws IOException {
+
+ String volumeName = keyArgs.getVolumeName();
+ String bucketName = keyArgs.getBucketName();
+
+ boolean acquireLock = false;
+ OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+ if (ozoneManager.getKmsProvider() != null) {
+ acquireLock = omMetadataManager.getLock().acquireReadLock(
+ BUCKET_LOCK, volumeName, bucketName);
+ try {
+ OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(
+ omMetadataManager.getMultipartKey(volumeName, bucketName,
+ keyArgs.getKeyName(), keyArgs.getMultipartUploadID()));
+
+ if (omKeyInfo != null && omKeyInfo.getFileEncryptionInfo() != null) {
+ newKeyArgs.setFileEncryptionInfo(
+ OMPBHelper.convert(omKeyInfo.getFileEncryptionInfo()));
+ }
+ } finally {
+ if (acquireLock) {
+ omMetadataManager.getLock().releaseReadLock(
+ BUCKET_LOCK, volumeName, bucketName);
+ }
+ }
+ }
+ }
+
/**
* Get FileEncryptionInfoProto from KeyArgs.
* @param keyArgs
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
index 474a6e1e0790..0cf3de475892 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java
@@ -23,7 +23,6 @@
import org.apache.hadoop.ozone.audit.OMAction;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -38,6 +37,7 @@
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.MultipartInfoInitiateResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.UniqueId;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
@@ -51,7 +51,6 @@
import java.util.Map;
import java.util.UUID;
-import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_SUPPORTED_OPERATION;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.InitiateMultiPartUpload;
@@ -81,6 +80,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException {
.setKeyName(validateAndNormalizeKey(
ozoneManager.getEnableFileSystemPaths(), keyArgs.getKeyName()));
+ generateRequiredEncryptionInfo(keyArgs, newKeyArgs, ozoneManager);
+
return getOmRequest().toBuilder()
.setUserInfo(getUserInfo())
.setInitiateMultiPartUploadRequest(
@@ -134,17 +135,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
- // If KMS is configured and TDE is enabled on bucket, throw MPU not
- // supported.
- if (ozoneManager.getKmsProvider() != null) {
- if (omMetadataManager.getBucketTable().get(
- omMetadataManager.getBucketKey(volumeName, bucketName))
- .getEncryptionKeyInfo() != null) {
- throw new OMException("MultipartUpload is not yet supported on " +
- "encrypted buckets", NOT_SUPPORTED_OPERATION);
- }
- }
-
// We are adding uploadId to key, because if multiple users try to
// perform multipart upload on the same key, each will try to upload, who
// ever finally commit the key, we see that key in ozone. Suppose if we
@@ -191,6 +181,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()))
.setObjectID(objectID)
.setUpdateID(transactionLogIndex)
+ .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ?
+ OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null)
.build();
// Add to cache
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
index 4389e732db09..64772b434838 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java
@@ -156,7 +156,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omKeyInfo.setDataSize(keyArgs.getDataSize());
omKeyInfo.updateLocationInfoList(keyArgs.getKeyLocationsList().stream()
.map(OmKeyLocationInfo::getFromProtobuf)
- .collect(Collectors.toList()));
+ .collect(Collectors.toList()), true);
// Set Modification time
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
// Set the UpdateID to current transactionLogIndex
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
index fd4af376d046..60938c2cbb63 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
@@ -54,7 +54,6 @@
import com.google.common.base.Optional;
import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type.CompleteMultiPartUpload;
@@ -219,12 +218,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// Except for last part all parts should have minimum size.
if (currentPartCount != partsListSize) {
- if (currentPartKeyInfo.getDataSize() < OM_MULTIPART_MIN_SIZE) {
- LOG.error("MultipartUpload: {} Part number: {} size {} is less "
- + "than minimum part size {}", ozoneKey,
- partKeyInfo.getPartNumber(),
- currentPartKeyInfo.getDataSize(),
- OzoneConsts.OM_MULTIPART_MIN_SIZE);
+ if (currentPartKeyInfo.getDataSize() <
+ ozoneManager.getMinMultipartUploadPartSize()) {
+ LOG.error("MultipartUpload: {} Part number: {} size {} is less" +
+ " than minimum part size {}", ozoneKey,
+ partKeyInfo.getPartNumber(), currentPartKeyInfo.getDataSize(),
+ ozoneManager.getMinMultipartUploadPartSize());
throw new OMException(
failureMessage(requestedVolume, requestedBucket, keyName) +
". Entity too small.",
@@ -235,6 +234,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// As all part keys will have only one version.
OmKeyLocationInfoGroup currentKeyInfoGroup = currentPartKeyInfo
.getKeyLocationVersions().get(0);
+
+ // Set partNumber in each block.
+ currentKeyInfoGroup.getLocationList().forEach(
+ omKeyLocationInfo -> omKeyLocationInfo.setPartNumber(partNumber));
+
partLocationInfos.addAll(currentKeyInfoGroup.getLocationList());
dataSize += currentPartKeyInfo.getDataSize();
}
@@ -250,7 +254,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
if (omKeyInfo == null) {
// This is a newly added key, it does not have any versions.
OmKeyLocationInfoGroup keyLocationInfoGroup = new
- OmKeyLocationInfoGroup(0, partLocationInfos);
+ OmKeyLocationInfoGroup(0, partLocationInfos, true);
// Get the objectID of the key from OpenKeyTable
OmKeyInfo dbOpenKeyInfo = omMetadataManager.getOpenKeyTable()
@@ -264,6 +268,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
.setCreationTime(keyArgs.getModificationTime())
.setModificationTime(keyArgs.getModificationTime())
.setDataSize(dataSize)
+ .setFileEncryptionInfo(dbOpenKeyInfo.getFileEncryptionInfo())
.setOmKeyLocationInfos(
Collections.singletonList(keyLocationInfoGroup))
.setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
@@ -279,7 +284,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
// But now as versioning is not supported, just following the commit
// key approach. When versioning support comes, then we can uncomment
// below code keyInfo.addNewVersion(locations);
- omKeyInfo.updateLocationInfoList(partLocationInfos);
+ omKeyInfo.updateLocationInfoList(partLocationInfos, true);
omKeyInfo.setModificationTime(keyArgs.getModificationTime());
omKeyInfo.setDataSize(dataSize);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 5d562471a8a7..5cbab70142a0 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -191,8 +191,8 @@ public OMResponse handleReadRequest(OMRequest request) {
responseBuilder.setDbUpdatesResponse(dbUpdatesResponse);
break;
case GetFileStatus:
- GetFileStatusResponse getFileStatusResponse =
- getOzoneFileStatus(request.getGetFileStatusRequest());
+ GetFileStatusResponse getFileStatusResponse = getOzoneFileStatus(
+ request.getGetFileStatusRequest(), request.getVersion());
responseBuilder.setGetFileStatusResponse(getFileStatusResponse);
break;
case LookupFile:
@@ -202,7 +202,7 @@ public OMResponse handleReadRequest(OMRequest request) {
break;
case ListStatus:
ListStatusResponse listStatusResponse =
- listStatus(request.getListStatusRequest());
+ listStatus(request.getListStatusRequest(), request.getVersion());
responseBuilder.setListStatusResponse(listStatusResponse);
break;
case GetAcl:
@@ -555,7 +555,7 @@ private ListMultipartUploadsResponse listMultipartUploads(
}
private GetFileStatusResponse getOzoneFileStatus(
- GetFileStatusRequest request) throws IOException {
+ GetFileStatusRequest request, int clientVersion) throws IOException {
KeyArgs keyArgs = request.getKeyArgs();
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(keyArgs.getVolumeName())
@@ -565,7 +565,7 @@ private GetFileStatusResponse getOzoneFileStatus(
.build();
GetFileStatusResponse.Builder rb = GetFileStatusResponse.newBuilder();
- rb.setStatus(impl.getFileStatus(omKeyArgs).getProtobuf());
+ rb.setStatus(impl.getFileStatus(omKeyArgs).getProtobuf(clientVersion));
return rb.build();
}
@@ -586,7 +586,7 @@ private LookupFileResponse lookupFile(LookupFileRequest request,
}
private ListStatusResponse listStatus(
- ListStatusRequest request) throws IOException {
+ ListStatusRequest request, int clientVersion) throws IOException {
KeyArgs keyArgs = request.getKeyArgs();
OmKeyArgs omKeyArgs = new OmKeyArgs.Builder()
.setVolumeName(keyArgs.getVolumeName())
@@ -601,7 +601,7 @@ private ListStatusResponse listStatus(
listStatusResponseBuilder =
ListStatusResponse.newBuilder();
for (OzoneFileStatus status : statuses) {
- listStatusResponseBuilder.addStatuses(status.getProtobuf());
+ listStatusResponseBuilder.addStatuses(status.getProtobuf(clientVersion));
}
return listStatusResponseBuilder.build();
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
index 33fd1cd215b2..088b232b82e9 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequest.java
@@ -21,14 +21,6 @@
import java.util.UUID;
-import com.google.common.base.Optional;
-import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
-import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
-import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
-import org.apache.hadoop.ozone.om.helpers.BucketEncryptionKeyInfo;
-import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
-import org.apache.hadoop.ozone.om.request.OMClientRequest;
-import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
@@ -36,12 +28,6 @@
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
-import org.mockito.Mockito;
-
-import static org.apache.hadoop.crypto.CipherSuite.AES_CTR_NOPADDING;
-import static org.apache.hadoop.crypto.CryptoProtocolVersion.ENCRYPTION_ZONES;
-import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_SUPPORTED_OPERATION;
-import static org.mockito.Mockito.when;
/**
* Tests S3 Initiate Multipart Upload request.
@@ -132,7 +118,6 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable()
.get(multipartKey));
-
}
@Test
@@ -141,7 +126,6 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
String bucketName = UUID.randomUUID().toString();
String keyName = UUID.randomUUID().toString();
-
OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName,
keyName);
@@ -162,49 +146,5 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
Assert.assertNull(omMetadataManager.getOpenKeyTable().get(multipartKey));
Assert.assertNull(omMetadataManager.getMultipartInfoTable()
.get(multipartKey));
-
- }
-
- @Test
- public void testMPUNotSupported() throws Exception {
- String volumeName = UUID.randomUUID().toString();
- String bucketName = UUID.randomUUID().toString();
- String keyName = UUID.randomUUID().toString();
-
- when(ozoneManager.getKmsProvider())
- .thenReturn(Mockito.mock(KeyProviderCryptoExtension.class));
-
- TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
-
- // Set encryption info and create bucket
- OmBucketInfo omBucketInfo =
- OmBucketInfo.newBuilder().setVolumeName(volumeName)
- .setBucketName(bucketName).setCreationTime(Time.now())
- .setBucketEncryptionKey(new BucketEncryptionKeyInfo.Builder()
- .setKeyName("dummy").setSuite(AES_CTR_NOPADDING)
- .setVersion(ENCRYPTION_ZONES).build())
- .build();
-
- String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
-
- omMetadataManager.getBucketTable().put(bucketKey, omBucketInfo);
-
- omMetadataManager.getBucketTable().addCacheEntry(new CacheKey<>(bucketKey),
- new CacheValue<>(Optional.of(omBucketInfo), 100L));
-
- OMRequest modifiedRequest = doPreExecuteInitiateMPU(volumeName, bucketName,
- keyName);
-
- OMClientRequest omClientRequest =
- new S3InitiateMultipartUploadRequest(modifiedRequest);
-
- OMClientResponse omClientResponse =
- omClientRequest.validateAndUpdateCache(ozoneManager, 1L,
- ozoneManagerDoubleBufferHelper);
-
- Assert.assertNotNull(omClientResponse.getOMResponse());
- Assert.assertEquals(NOT_SUPPORTED_OPERATION,
- omClientResponse.getOMResponse().getStatus());
-
}
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index ce5b41630174..6bf2a9715910 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.ozone.recon.api;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.DatanodeInfo;
@@ -86,6 +87,7 @@ public Response getDatanodes() {
} catch (NodeNotFoundException e) {
LOG.warn("Cannot get nodeState for datanode {}", datanode, e);
}
+ final NodeOperationalState nodeOpState = datanode.getPersistedOpState();
String hostname = datanode.getHostName();
Set pipelineIDs = nodeManager.getPipelines(datanode);
List pipelines = new ArrayList<>();
@@ -126,6 +128,7 @@ public Response getDatanodes() {
.withDatanodeStorageReport(storageReport)
.withLastHeartbeat(nodeManager.getLastHeartbeat(datanode))
.withState(nodeState)
+ .withOperationalState(nodeOpState)
.withPipelines(pipelines)
.withLeaderCount(leaderCount.get())
.withUUid(datanode.getUuidString())
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java
index 426a388eeaaf..0aa2952ce174 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.recon.api.types;
import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import javax.xml.bind.annotation.XmlAccessType;
@@ -40,6 +41,9 @@ public final class DatanodeMetadata {
@XmlElement(name = "state")
private NodeState state;
+ @XmlElement(name = "opState")
+ private NodeOperationalState opState;
+
@XmlElement(name = "lastHeartbeat")
private long lastHeartbeat;
@@ -74,6 +78,7 @@ private DatanodeMetadata(Builder builder) {
this.hostname = builder.hostname;
this.uuid = builder.uuid;
this.state = builder.state;
+ this.opState = builder.opState;
this.lastHeartbeat = builder.lastHeartbeat;
this.datanodeStorageReport = builder.datanodeStorageReport;
this.pipelines = builder.pipelines;
@@ -94,6 +99,10 @@ public NodeState getState() {
return state;
}
+ public NodeOperationalState getOperationalState() {
+ return opState;
+ }
+
public long getLastHeartbeat() {
return lastHeartbeat;
}
@@ -154,6 +163,7 @@ public static final class Builder {
private String hostname;
private String uuid;
private NodeState state;
+ private NodeOperationalState opState;
private long lastHeartbeat;
private DatanodeStorageReport datanodeStorageReport;
private List pipelines;
@@ -180,6 +190,11 @@ public Builder withState(NodeState state) {
return this;
}
+ public Builder withOperationalState(NodeOperationalState opState) {
+ this.opState = opState;
+ return this;
+ }
+
public Builder withLastHeartbeat(long lastHeartbeat) {
this.lastHeartbeat = lastHeartbeat;
return this;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index 5af96b36c09f..9a7c75cf64b2 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
@@ -129,8 +130,8 @@ public long getLastHeartbeat(DatanodeDetails datanodeDetails) {
@Override
public void onMessage(CommandForDatanode commandForDatanode,
EventPublisher ignored) {
- if (ALLOWED_COMMANDS.contains(
- commandForDatanode.getCommand().getType())) {
+ final Type cmdType = commandForDatanode.getCommand().getType();
+ if (ALLOWED_COMMANDS.contains(cmdType)) {
super.onMessage(commandForDatanode, ignored);
} else {
LOG.debug("Ignoring unsupported command {} for Datanode {}.",
@@ -156,4 +157,14 @@ public List processHeartbeat(DatanodeDetails datanodeDetails,
.filter(c -> ALLOWED_COMMANDS.contains(c.getType()))
.collect(toList());
}
+
+ @Override
+ protected void updateDatanodeOpState(DatanodeDetails reportedDn)
+ throws NodeNotFoundException {
+ super.updateDatanodeOpState(reportedDn);
+ // Update NodeOperationalState in NodeStatus to keep it consistent for Recon
+ super.getNodeStateManager().setNodeOperationalState(reportedDn,
+ reportedDn.getPersistedOpState(),
+ reportedDn.getPersistedOpStateExpiryEpochSec());
+ }
}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
index 8d61333b087b..eb17c9cc8754 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
@@ -20,6 +20,7 @@
"hostname": "localhost1.storage.enterprise.com",
"uuid": "b590734e-a5f2-11ea-bb37-0242ac130002",
"state": "HEALTHY",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1574728876059,
"storageReport": {
"capacity": 62725623808,
@@ -51,6 +52,7 @@
"hostname": "localhost2.storage.enterprise.com",
"uuid": "b5907812-a5f2-11ea-bb37-0242ac130002",
"state": "HEALTHY",
+ "opState": "DECOMMISSIONING",
"lastHeartbeat": 1574724876059,
"storageReport": {
"capacity": 549755813888,
@@ -80,8 +82,105 @@
},
{
"hostname": "localhost3.storage.enterprise.com",
+ "uuid": "b5907812-a5f2-11ea-bb37-0242ac130002",
+ "state": "HEALTHY",
+ "opState": "DECOMMISSIONED",
+ "lastHeartbeat": 1574724876059,
+ "storageReport": {
+ "capacity": 549755813888,
+ "used": 450971566080,
+ "remaining": 95784247808
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost2.storage.enterprise.com"
+ }
+ ],
+ "containers": 8192,
+ "leaderCount": 1,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1574724805059,
+ "revision": "caf471111cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-20T15:45Z"
+ },
+ {
+ "hostname": "localhost4.storage.enterprise.com",
+ "uuid": "b5907812-a5f2-11ea-bb37-0242ac130002",
+ "state": "HEALTHY",
+ "opState": "ENTERING_MAINTENANCE",
+ "lastHeartbeat": 1574724876059,
+ "storageReport": {
+ "capacity": 549755813888,
+ "used": 450971566080,
+ "remaining": 95784247808
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost2.storage.enterprise.com"
+ }
+ ],
+ "containers": 8192,
+ "leaderCount": 1,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1574724805059,
+ "revision": "caf471111cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-20T15:45Z"
+ },
+ {
+ "hostname": "localhost5.storage.enterprise.com",
+ "uuid": "b5907812-a5f2-11ea-bb37-0242ac130002",
+ "state": "HEALTHY",
+ "opState": "IN_MAINTENANCE",
+ "lastHeartbeat": 1574724876059,
+ "storageReport": {
+ "capacity": 549755813888,
+ "used": 450971566080,
+ "remaining": 95784247808
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost2.storage.enterprise.com"
+ }
+ ],
+ "containers": 8192,
+ "leaderCount": 1,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1574724805059,
+ "revision": "caf471111cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-20T15:45Z"
+ },
+ {
+ "hostname": "localhost6.storage.enterprise.com",
"uuid": "b5907934-a5f2-11ea-bb37-0242ac130002",
"state": "STALE",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1343544879843,
"storageReport": {
"capacity": 140737488355328,
@@ -116,9 +215,162 @@
"buildDate": "2020-07-19T13:45Z"
},
{
- "hostname": "localhost4.storage.enterprise.com",
+ "hostname": "localhost7.storage.enterprise.com",
+ "uuid": "b5907934-a5f2-11ea-bb37-0242ac130002",
+ "state": "STALE",
+ "opState": "DECOMMISSIONING",
+ "lastHeartbeat": 1343544879843,
+ "storageReport": {
+ "capacity": 140737488355328,
+ "used": 43980465111040,
+ "remaining": 86757023244288
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "STAND_ALONE",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ }
+ ],
+ "containers": 43,
+ "leaderCount": 2,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1343544679543,
+ "revision": "aaf470000cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-19T13:45Z"
+ },
+ {
+ "hostname": "localhost8.storage.enterprise.com",
+ "uuid": "b5907934-a5f2-11ea-bb37-0242ac130002",
+ "state": "STALE",
+ "opState": "DECOMMISSIONED",
+ "lastHeartbeat": 1343544879843,
+ "storageReport": {
+ "capacity": 140737488355328,
+ "used": 43980465111040,
+ "remaining": 86757023244288
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "STAND_ALONE",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ }
+ ],
+ "containers": 43,
+ "leaderCount": 2,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1343544679543,
+ "revision": "aaf470000cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-19T13:45Z"
+ },
+ {
+ "hostname": "localhost9.storage.enterprise.com",
+ "uuid": "b5907934-a5f2-11ea-bb37-0242ac130002",
+ "state": "STALE",
+ "opState": "ENTERING_MAINTENANCE",
+ "lastHeartbeat": 1343544879843,
+ "storageReport": {
+ "capacity": 140737488355328,
+ "used": 43980465111040,
+ "remaining": 86757023244288
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "STAND_ALONE",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ }
+ ],
+ "containers": 43,
+ "leaderCount": 2,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1343544679543,
+ "revision": "aaf470000cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-19T13:45Z"
+ },
+ {
+ "hostname": "localhost10.storage.enterprise.com",
+ "uuid": "b5907934-a5f2-11ea-bb37-0242ac130002",
+ "state": "STALE",
+ "opState": "IN_MAINTENANCE",
+ "lastHeartbeat": 1343544879843,
+ "storageReport": {
+ "capacity": 140737488355328,
+ "used": 43980465111040,
+ "remaining": 86757023244288
+ },
+ "pipelines": [
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "RATIS",
+ "replicationFactor": 3,
+ "leaderNode": "localhost1.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "05e3d908-ff01-4ce6-ad75-f3ec79bcc7982",
+ "replicationType": "RATIS",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ },
+ {
+ "pipelineID": "02e3d908-ff01-4ce6-ad75-f3ec79bcc71a",
+ "replicationType": "STAND_ALONE",
+ "replicationFactor": 1,
+ "leaderNode": "localhost3.storage.enterprise.com"
+ }
+ ],
+ "containers": 43,
+ "leaderCount": 2,
+ "version": "0.6.0-SNAPSHOT",
+ "setupTime": 1343544679543,
+ "revision": "aaf470000cdb9168ec013f4526bb997aa513e079",
+ "buildDate": "2020-07-19T13:45Z"
+ },
+ {
+ "hostname": "localhost11.storage.enterprise.com",
"uuid": "b5907a06-a5f2-11ea-bb37-0242ac130002",
"state": "DEAD",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1074724876059,
"storageReport": {
"capacity": 140737488355328,
@@ -134,9 +386,10 @@
"buildDate": "2020-07-19T13:45Z"
},
{
- "hostname": "localhost5.storage.enterprise.com",
+ "hostname": "localhost12.storage.enterprise.com",
"uuid": "b5907ac4-a5f2-11ea-bb37-0242ac130002",
- "state": "DECOMMISSIONING",
+ "state": "DEAD",
+ "opState": "DECOMMISSIONING",
"lastHeartbeat": 1574724876059,
"storageReport": {
"capacity": 805306368000,
@@ -165,9 +418,10 @@
"buildDate": "2020-07-19T13:45Z"
},
{
- "hostname": "localhost6.storage.enterprise.com",
+ "hostname": "localhost13.storage.enterprise.com",
"uuid": "b5907b82-a5f2-11ea-bb37-0242ac130002",
- "state": "HEALTHY",
+ "state": "DEAD",
+ "opState": "DECOMMISSIONED",
"lastHeartbeat": 1574724876059,
"storageReport": {
"capacity": 140737488355328,
@@ -196,9 +450,10 @@
"buildDate": "2020-07-19T13:45Z"
},
{
- "hostname": "localhost7.storage.enterprise.com",
+ "hostname": "localhost14.storage.enterprise.com",
"uuid": "b5907c40-a5f2-11ea-bb37-0242ac130002",
- "state": "HEALTHY",
+ "state": "DEAD",
+ "opState": "ENTERING_MAINTENANCE",
"lastHeartbeat": 1574724876059,
"storageReport": {
"capacity": 549755813888,
@@ -233,9 +488,10 @@
"buildDate": "2020-07-19T13:45Z"
},
{
- "hostname": "localhost8.storage.enterprise.com",
+ "hostname": "localhost15.storage.enterprise.com",
"uuid": "b5907cf4-a5f2-11ea-bb37-0242ac130002",
- "state": "DECOMMISSIONED",
+ "state": "DEAD",
+ "opState": "IN_MAINTENANCE",
"lastHeartbeat": 1574724876059,
"storageReport": {
"capacity": 140737488355328,
@@ -264,9 +520,10 @@
"buildDate": "2020-07-20T15:45Z"
},
{
- "hostname": "localhost9.storage.enterprise.com",
+ "hostname": "localhost16.storage.enterprise.com",
"uuid": "b5907f4c-a5f2-11ea-bb37-0242ac130002",
"state": "HEALTHY",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1574724874011,
"storageReport": {
"capacity": 140737488355328,
@@ -295,9 +552,10 @@
"buildDate": "2020-07-20T15:45Z"
},
{
- "hostname": "localhost10.storage.enterprise.com",
+ "hostname": "localhost17.storage.enterprise.com",
"uuid": "b590801e-a5f2-11ea-bb37-0242ac130002",
"state": "HEALTHY",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1574723876959,
"storageReport": {
"capacity": 140737488355328,
@@ -332,9 +590,10 @@
"buildDate": "2020-07-20T15:45Z"
},
{
- "hostname": "localhost11.storage.enterprise.com",
+ "hostname": "localhost18.storage.enterprise.com",
"uuid": "b59080e6-a5f2-11ea-bb37-0242ac130002",
"state": "STALE",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1474724876783,
"storageReport": {
"capacity": 140737488355328,
@@ -363,9 +622,10 @@
"buildDate": "2020-07-20T10:45Z"
},
{
- "hostname": "localhost12.storage.enterprise.com",
+ "hostname": "localhost19.storage.enterprise.com",
"uuid": "b59081a4-a5f2-11ea-bb37-0242ac130002",
"state": "HEALTHY",
+ "opState": "IN_SERVICE",
"lastHeartbeat": 1574724796532,
"storageReport": {
"capacity": 140737488355328,
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
index e9cb16820814..8f92742916f3 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/types/datanode.types.tsx
@@ -16,9 +16,15 @@
* limitations under the License.
*/
-export const DatanodeStatusList = ['HEALTHY', 'STALE', 'DEAD', 'DECOMMISSIONING', 'DECOMMISSIONED'] as const;
-type DatanodeStatusTuple = typeof DatanodeStatusList;
-export type DatanodeStatus = DatanodeStatusTuple[number]; // 'HEALTHY' | 'STALE' | 'DEAD' | 'DECOMMISSIONING' | 'DECOMMISSIONED';
+// Corresponds to HddsProtos.NodeState
+export const DatanodeStateList = ['HEALTHY', 'STALE', 'DEAD'] as const;
+type DatanodeStateType = typeof DatanodeStateList;
+export type DatanodeState = DatanodeStateType[number];
+
+// Corresponds to HddsProtos.NodeOperationalState
+export const DatanodeOpStateList = ['IN_SERVICE', 'DECOMMISSIONING', 'DECOMMISSIONED', 'ENTERING_MAINTENANCE', 'IN_MAINTENANCE'] as const;
+type DatanodeOpStateType = typeof DatanodeOpStateList;
+export type DatanodeOpState = DatanodeOpStateType[number];
export interface IStorageReport {
capacity: number;
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
index 91b6a45aa99d..a9631c66e62e 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
@@ -23,7 +23,13 @@ import {PaginationConfig} from 'antd/lib/pagination';
import moment from 'moment';
import {ReplicationIcon} from 'utils/themeIcons';
import StorageBar from 'components/storageBar/storageBar';
-import {DatanodeStatus, DatanodeStatusList, IStorageReport} from 'types/datanode.types';
+import {
+ DatanodeState,
+ DatanodeStateList,
+ DatanodeOpState,
+ DatanodeOpStateList,
+ IStorageReport
+} from 'types/datanode.types';
import './datanodes.less';
import {AutoReloadHelper} from 'utils/autoReloadHelper';
import AutoReloadPanel from 'components/autoReloadPanel/autoReloadPanel';
@@ -34,7 +40,8 @@ import {ColumnSearch} from 'utils/columnSearch';
interface IDatanodeResponse {
hostname: string;
- state: DatanodeStatus;
+ state: DatanodeState;
+ opState: DatanodeOpState;
lastHeartbeat: number;
storageReport: IStorageReport;
pipelines: IPipeline[];
@@ -54,7 +61,8 @@ interface IDatanodesResponse {
interface IDatanode {
hostname: string;
- state: DatanodeStatus;
+ state: DatanodeState;
+ opState: DatanodeOpState;
lastHeartbeat: number;
storageUsed: number;
storageTotal: number;
@@ -85,28 +93,50 @@ interface IDatanodesState {
columnOptions: IOption[];
}
-const renderDatanodeStatus = (status: DatanodeStatus) => {
- const statusIconMap = {
+const renderDatanodeState = (state: DatanodeState) => {
+ const stateIconMap = {
HEALTHY: ,
STALE: ,
- DEAD: ,
- DECOMMISSIONING: ,
- DECOMMISSIONED:
+ DEAD:
};
- const icon = status in statusIconMap ? statusIconMap[status] : '';
- return {icon} {status};
+ const icon = state in stateIconMap ? stateIconMap[state] : '';
+ return {icon} {state};
+};
+
+const renderDatanodeOpState = (opState: DatanodeOpState) => {
+ const opStateIconMap = {
+ IN_SERVICE: ,
+ DECOMMISSIONING: ,
+ DECOMMISSIONED: ,
+ ENTERING_MAINTENANCE: ,
+ IN_MAINTENANCE:
+ };
+ const icon = opState in opStateIconMap ? opStateIconMap[opState] : '';
+ return {icon} {opState};
};
const COLUMNS = [
{
- title: 'Status',
+ title: 'State',
dataIndex: 'state',
key: 'state',
isVisible: true,
filterMultiple: true,
- filters: DatanodeStatusList.map(status => ({text: status, value: status})),
- onFilter: (value: DatanodeStatus, record: IDatanode) => record.state === value,
- render: (text: DatanodeStatus) => renderDatanodeStatus(text),
+ filters: DatanodeStateList.map(state => ({text: state, value: state})),
+ onFilter: (value: DatanodeState, record: IDatanode) => record.state === value,
+ render: (text: DatanodeState) => renderDatanodeState(text),
+ sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state),
+ fixed: 'left'
+ },
+ {
+ title: 'Operational State',
+ dataIndex: 'opState',
+ key: 'opState',
+ isVisible: true,
+ filterMultiple: true,
+ filters: DatanodeOpStateList.map(state => ({text: state, value: state})),
+ onFilter: (value: DatanodeOpState, record: IDatanode) => record.opState === value,
+ render: (text: DatanodeOpState) => renderDatanodeOpState(text),
sorter: (a: IDatanode, b: IDatanode) => a.state.localeCompare(b.state),
fixed: 'left'
},
@@ -290,6 +320,7 @@ export class Datanodes extends React.Component, IDatanode
hostname: datanode.hostname,
uuid: datanode.uuid,
state: datanode.state,
+ opState: datanode.opState,
lastHeartbeat: datanode.lastHeartbeat,
storageUsed: datanode.storageReport.used,
storageTotal: datanode.storageReport.capacity,
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index a2a87a805e27..70fe2821d670 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -24,6 +24,8 @@
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos
.ExtendedDatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
@@ -41,6 +43,8 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
@@ -111,6 +115,7 @@
import java.util.List;
import java.util.UUID;
import java.util.concurrent.Callable;
+import java.util.concurrent.atomic.AtomicInteger;
/**
* Test for Recon API endpoints.
@@ -382,6 +387,11 @@ public void setUp() throws Exception {
private void testDatanodeResponse(DatanodeMetadata datanodeMetadata)
throws IOException {
+ // Check NodeState and NodeOperationalState field existence
+ Assert.assertEquals(NodeState.HEALTHY, datanodeMetadata.getState());
+ Assert.assertEquals(NodeOperationalState.IN_SERVICE,
+ datanodeMetadata.getOperationalState());
+
String hostname = datanodeMetadata.getHostname();
switch (hostname) {
case HOST1:
@@ -451,6 +461,43 @@ public void testGetDatanodes() throws Exception {
reconScm.getPipelineManager()
.getContainersInPipeline(pipeline.getId()).size() == 1);
});
+
+ // Change Node OperationalState with NodeManager
+ final NodeManager nodeManager = reconScm.getScmNodeManager();
+ final DatanodeDetails dnDetailsInternal =
+ nodeManager.getNodeByUuid(datanodeDetails.getUuidString());
+ // Backup existing state and sanity check
+ final NodeStatus nStatus = nodeManager.getNodeStatus(dnDetailsInternal);
+ final NodeOperationalState backupOpState =
+ dnDetailsInternal.getPersistedOpState();
+ final long backupOpStateExpiry =
+ dnDetailsInternal.getPersistedOpStateExpiryEpochSec();
+ assertEquals(backupOpState, nStatus.getOperationalState());
+ assertEquals(backupOpStateExpiry, nStatus.getOpStateExpiryEpochSeconds());
+
+ dnDetailsInternal.setPersistedOpState(NodeOperationalState.DECOMMISSIONING);
+ dnDetailsInternal.setPersistedOpStateExpiryEpochSec(666L);
+ nodeManager.setNodeOperationalState(dnDetailsInternal,
+ NodeOperationalState.DECOMMISSIONING, 666L);
+ // Check if the endpoint response reflects the change
+ response = nodeEndpoint.getDatanodes();
+ datanodesResponse = (DatanodesResponse) response.getEntity();
+ // Order of datanodes in the response is random
+ AtomicInteger count = new AtomicInteger();
+ datanodesResponse.getDatanodes().forEach(metadata -> {
+ if (metadata.getUuid().equals(dnDetailsInternal.getUuidString())) {
+ count.incrementAndGet();
+ assertEquals(NodeOperationalState.DECOMMISSIONING,
+ metadata.getOperationalState());
+ }
+ });
+ assertEquals(1, count.get());
+
+ // Restore state
+ dnDetailsInternal.setPersistedOpState(backupOpState);
+ dnDetailsInternal.setPersistedOpStateExpiryEpochSec(backupOpStateExpiry);
+ nodeManager.setNodeOperationalState(dnDetailsInternal,
+ backupOpState, backupOpStateExpiry);
}
@Test
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index c49104b4850d..919974545c0c 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
@@ -81,7 +82,7 @@ public void tearDown() throws Exception {
}
@Test
- public void testReconNodeDB() throws IOException {
+ public void testReconNodeDB() throws IOException, NodeNotFoundException {
ReconStorageConfig scmStorageConfig = new ReconStorageConfig(conf);
EventQueue eventQueue = new EventQueue();
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
@@ -116,6 +117,18 @@ public void testReconNodeDB() throws IOException {
reconNodeManager.addDatanodeCommand(datanodeDetails.getUuid(),
new ReregisterCommand());
+ // OperationalState sanity check
+ final DatanodeDetails dnDetails =
+ reconNodeManager.getNodeByUuid(datanodeDetails.getUuidString());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ dnDetails.getPersistedOpState());
+ assertEquals(dnDetails.getPersistedOpState(),
+ reconNodeManager.getNodeStatus(dnDetails)
+ .getOperationalState());
+ assertEquals(dnDetails.getPersistedOpStateExpiryEpochSec(),
+ reconNodeManager.getNodeStatus(dnDetails)
+ .getOpStateExpiryEpochSeconds());
+
// Upon processing the heartbeat, the illegal command should be filtered out
List returnedCmds =
reconNodeManager.processHeartbeat(datanodeDetails,
@@ -124,6 +137,23 @@ public void testReconNodeDB() throws IOException {
assertEquals(SCMCommandProto.Type.reregisterCommand,
returnedCmds.get(0).getType());
+ // Now feed a DECOMMISSIONED heartbeat of the same DN
+ datanodeDetails.setPersistedOpState(
+ HddsProtos.NodeOperationalState.DECOMMISSIONED);
+ datanodeDetails.setPersistedOpStateExpiryEpochSec(12345L);
+ reconNodeManager.processHeartbeat(datanodeDetails,
+ defaultLayoutVersionProto());
+ // Check both persistedOpState and NodeStatus#operationalState
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+ dnDetails.getPersistedOpState());
+ assertEquals(dnDetails.getPersistedOpState(),
+ reconNodeManager.getNodeStatus(dnDetails)
+ .getOperationalState());
+ assertEquals(12345L, dnDetails.getPersistedOpStateExpiryEpochSec());
+ assertEquals(dnDetails.getPersistedOpStateExpiryEpochSec(),
+ reconNodeManager.getNodeStatus(dnDetails)
+ .getOpStateExpiryEpochSeconds());
+
// Close the DB, and recreate the instance of Recon Node Manager.
eventQueue.close();
reconNodeManager.close();
@@ -135,4 +165,4 @@ public void testReconNodeDB() throws IOException {
assertNotNull(
reconNodeManager.getNodeByUuid(datanodeDetails.getUuidString()));
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java
new file mode 100644
index 000000000000..b4875fc36968
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCBatch.java
@@ -0,0 +1,138 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.genesis;
+
+import java.nio.ByteBuffer;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.util.NativeCRC32Wrapper;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * Class to benchmark hadoop native CRC implementations in batch node.
+ *
+ * The hadoop native libraries must be available to run this test. libhadoop.so
+ * is not currently bundled with Ozone, so it needs to be obtained from a Hadoop
+ * build and the test needs to be executed on a compatible OS (ie Linux x86):
+ *
+ * ozone --jvmargs -Djava.library.path=/home/sodonnell/native genesis -b
+ * BenchmarkCRCBatch
+ */
+public class BenchMarkCRCBatch {
+
+ private static int dataSize = 64 * 1024 * 1024;
+
+ @State(Scope.Thread)
+ public static class BenchmarkState {
+
+ private final ByteBuffer data = ByteBuffer.allocate(dataSize);
+
+ @Param({"512", "1024", "2048", "4096", "32768", "1048576"})
+ private int checksumSize;
+
+ @Param({"nativeCRC32", "nativeCRC32C"})
+ private String crcImpl;
+
+ private byte[] checksumBuffer;
+ private int nativeChecksumType = 1;
+
+ public ByteBuffer data() {
+ return data;
+ }
+
+ public int checksumSize() {
+ return checksumSize;
+ }
+
+ public String crcImpl() {
+ return crcImpl;
+ }
+
+ @edu.umd.cs.findbugs.annotations.SuppressFBWarnings(
+ value="EI_EXPOSE_REP",
+ justification="The intent is to expose this variable")
+ public byte[] checksumBuffer() {
+ return checksumBuffer;
+ }
+
+ public int nativeChecksumType() {
+ return nativeChecksumType;
+ }
+
+ @Setup(Level.Trial)
+ public void setUp() {
+ switch (crcImpl) {
+ case "nativeCRC32":
+ if (NativeCRC32Wrapper.isAvailable()) {
+ nativeChecksumType = NativeCRC32Wrapper.CHECKSUM_CRC32;
+ checksumBuffer = new byte[4 * dataSize / checksumSize];
+ } else {
+ throw new RuntimeException("Native library is not available");
+ }
+ break;
+ case "nativeCRC32C":
+ if (NativeCRC32Wrapper.isAvailable()) {
+ nativeChecksumType = NativeCRC32Wrapper.CHECKSUM_CRC32C;
+ checksumBuffer = new byte[4 * dataSize / checksumSize];
+ } else {
+ throw new RuntimeException("Native library is not available");
+ }
+ break;
+ default:
+ }
+ data.put(RandomUtils.nextBytes(data.remaining()));
+ }
+ }
+
+ @Benchmark
+ @Threads(1)
+ @Warmup(iterations = 3, time = 1000, timeUnit = MILLISECONDS)
+ @Fork(value = 1, warmups = 0)
+ @Measurement(iterations = 5, time = 2000, timeUnit = MILLISECONDS)
+ @BenchmarkMode(Mode.Throughput)
+ public void runCRCNativeBatch(Blackhole blackhole, BenchmarkState state) {
+ if (state.crcImpl.equals("nativeCRC32")
+ || state.crcImpl.equals("nativeCRC32C")) {
+ NativeCRC32Wrapper.calculateChunkedSumsByteArray(
+ state.checksumSize, state.nativeChecksumType, state.checksumBuffer,
+ 0, state.data.array(), 0, state.data.capacity());
+ blackhole.consume(state.checksumBuffer);
+ } else {
+ throw new RuntimeException("Batch mode not available for "
+ + state.crcImpl);
+ }
+ }
+
+ public static void main(String[] args) throws Exception {
+ org.openjdk.jmh.Main.main(args);
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java
new file mode 100644
index 000000000000..5dd5da8390ac
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkCRCStreaming.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.genesis;
+
+import java.nio.ByteBuffer;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.ozone.common.ChecksumByteBuffer;
+import org.apache.hadoop.ozone.common.ChecksumByteBufferFactory;
+import org.apache.hadoop.ozone.common.ChecksumByteBufferImpl;
+import org.apache.hadoop.ozone.common.NativeCheckSumCRC32;
+import org.apache.hadoop.ozone.common.PureJavaCrc32ByteBuffer;
+import org.apache.hadoop.ozone.common.PureJavaCrc32CByteBuffer;
+import org.apache.hadoop.util.NativeCRC32Wrapper;
+import org.apache.hadoop.util.PureJavaCrc32;
+import org.apache.hadoop.util.PureJavaCrc32C;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+import java.util.zip.CRC32;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * Class to benchmark various CRC implementations. This can be executed via
+ *
+ * ozone genesis -b BenchmarkCRC
+ *
+ * However there are some points to keep in mind. java.util.zip.CRC32C is not
+ * available until Java 9, therefore if the JVM has a lower version than 9, that
+ * implementation will not be tested.
+ *
+ * The hadoop native libraries will only be tested if libhadoop.so is found on
+ * the "-Djava.library.path". libhadoop.so is not currently bundled with Ozone,
+ * so it needs to be obtained from a Hadoop build and the test needs to be
+ * executed on a compatible OS (ie Linux x86):
+ *
+ * ozone --jvmargs -Djava.library.path=/home/sodonnell/native genesis -b
+ * BenchmarkCRC
+ */
+public class BenchMarkCRCStreaming {
+
+ private static int dataSize = 64 * 1024 * 1024;
+
+ @State(Scope.Thread)
+ public static class BenchmarkState {
+
+ private final ByteBuffer data = ByteBuffer.allocate(dataSize);
+
+ @Param({"512", "1024", "2048", "4096", "32768", "1048576"})
+ private int checksumSize;
+
+ @Param({"pureCRC32", "pureCRC32C", "hadoopCRC32C", "hadoopCRC32",
+ "zipCRC32", "zipCRC32C", "nativeCRC32", "nativeCRC32C"})
+ private String crcImpl;
+
+ private ChecksumByteBuffer checksum;
+
+ public ChecksumByteBuffer checksum() {
+ return checksum;
+ }
+
+ public String crcImpl() {
+ return crcImpl;
+ }
+
+ public int checksumSize() {
+ return checksumSize;
+ }
+
+ @Setup(Level.Trial)
+ public void setUp() {
+ switch (crcImpl) {
+ case "pureCRC32":
+ checksum = new PureJavaCrc32ByteBuffer();
+ break;
+ case "pureCRC32C":
+ checksum = new PureJavaCrc32CByteBuffer();
+ break;
+ case "hadoopCRC32":
+ checksum = new ChecksumByteBufferImpl(new PureJavaCrc32());
+ break;
+ case "hadoopCRC32C":
+ checksum = new ChecksumByteBufferImpl(new PureJavaCrc32C());
+ break;
+ case "zipCRC32":
+ checksum = new ChecksumByteBufferImpl(new CRC32());
+ break;
+ case "zipCRC32C":
+ try {
+ checksum = new ChecksumByteBufferImpl(
+ ChecksumByteBufferFactory.Java9Crc32CFactory.createChecksum());
+ } catch (Throwable e) {
+ throw new RuntimeException("zipCRC32C is not available pre Java 9");
+ }
+ break;
+ case "nativeCRC32":
+ if (NativeCRC32Wrapper.isAvailable()) {
+ checksum = new ChecksumByteBufferImpl(new NativeCheckSumCRC32(
+ NativeCRC32Wrapper.CHECKSUM_CRC32, checksumSize));
+ } else {
+ throw new RuntimeException("Native library is not available");
+ }
+ break;
+ case "nativeCRC32C":
+ if (NativeCRC32Wrapper.isAvailable()) {
+ checksum = new ChecksumByteBufferImpl(new NativeCheckSumCRC32(
+ NativeCRC32Wrapper.CHECKSUM_CRC32C, checksumSize));
+ } else {
+ throw new RuntimeException("Native library is not available");
+ }
+ break;
+ default:
+ }
+ data.clear();
+ data.put(RandomUtils.nextBytes(data.remaining()));
+ }
+ }
+
+ @Benchmark
+ @Threads(1)
+ @Warmup(iterations = 3, time = 1000, timeUnit = MILLISECONDS)
+ @Fork(value = 1, warmups = 0)
+ @Measurement(iterations = 5, time = 2000, timeUnit = MILLISECONDS)
+ @BenchmarkMode(Mode.Throughput)
+ public void runCRC(Blackhole blackhole, BenchmarkState state) {
+ ByteBuffer data = state.data;
+ data.clear();
+ ChecksumByteBuffer csum = state.checksum;
+ int bytesPerCheckSum = state.checksumSize;
+
+ for (int i=0; i2.27
- 2.10.3
+ 2.12.11.2.0
diff --git a/tools/fault-injection-service/CMakeLists.txt b/tools/fault-injection-service/CMakeLists.txt
index ebb3cad45bac..8639237f484a 100644
--- a/tools/fault-injection-service/CMakeLists.txt
+++ b/tools/fault-injection-service/CMakeLists.txt
@@ -1,114 +1,115 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with this
-# work for additional information regarding copyright ownership. The ASF
-# licenses this file to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-#
-# cmake build file for C++ failure_injection_service.
-# Assumes protobuf and gRPC have been installed using cmake.
-
-cmake_minimum_required(VERSION 2.8)
-
-project(FailureInjectionService C CXX)
-
-set(BASE_DIR ".")
-set(FS_DIR "${BASE_DIR}/FileSystem")
-set(SRV_DIR "${BASE_DIR}/Service/cpp")
-
-set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fpermissive -Wall")
-
-# This assumes that gRPC and all its dependencies are already installed
-# on this system, so they can be located by find_package().
-
-# Find Protobuf installation
-# Looks for protobuf-config.cmake file installed by Protobuf's cmake
-# installation.
-set(protobuf_MODULE_COMPATIBLE TRUE)
-find_package(Protobuf CONFIG REQUIRED)
-message(STATUS "Using protobuf ${protobuf_VERSION}")
-
-set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf)
-set(_PROTOBUF_PROTOC $)
-
-# Find gRPC installation
-# Looks for gRPCConfig.cmake file installed by gRPC's cmake installation.
-find_package(gRPC CONFIG REQUIRED)
-message(STATUS "Using gRPC ${gRPC_VERSION}")
-
-set(_GRPC_GRPCPP_UNSECURE gRPC::grpc++_unsecure)
-set(_GRPC_CPP_PLUGIN_EXECUTABLE $)
-
-# Proto file
-get_filename_component(hw_proto
- "${BASE_DIR}/Service/protos/failure_injection_service.proto"
- ABSOLUTE)
-get_filename_component(hw_proto_path "${hw_proto}" PATH)
-
-# Generated sources
-set(hw_proto_srcs
- "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.pb.cc")
-set(hw_proto_hdrs
- "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.pb.h")
-set(hw_grpc_srcs
- "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.grpc.pb.cc")
-set(hw_grpc_hdrs
- "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.grpc.pb.h")
-add_custom_command(
- OUTPUT "${hw_proto_srcs}" "${hw_proto_hdrs}"
- "${hw_grpc_srcs}" "${hw_grpc_hdrs}"
- COMMAND ${_PROTOBUF_PROTOC}
- ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}"
- --cpp_out "${CMAKE_CURRENT_BINARY_DIR}"
- -I "${hw_proto_path}"
- --plugin=protoc-gen-grpc="${_GRPC_CPP_PLUGIN_EXECUTABLE}"
- "${hw_proto}"
- DEPENDS "${hw_proto}")
-
-# Include generated *.pb.h files
-include_directories("${CMAKE_CURRENT_BINARY_DIR}"
- "${FS_DIR}"
- "${SRV_DIR}")
-
-#add_compile_options("-fpermissive")
-
-# Build server
-add_executable(failure_injector_svc_server
- ${FS_DIR}/failure_injector_fs.cc ${FS_DIR}/failure_injector.cc
- ${SRV_DIR}/failure_injector_svc_server.cc ${SRV_DIR}/run_grpc_service.cc
- ${hw_proto_srcs}
- ${hw_grpc_srcs})
-target_link_libraries(failure_injector_svc_server
- ${_GRPC_GRPCPP_UNSECURE}
- fuse3
- ${_PROTOBUF_LIBPROTOBUF})
-
-# Build client
-add_executable(failure_injector_svc_client
- "${SRV_DIR}/failure_injector_svc_client.cc"
- ${hw_proto_srcs}
- ${hw_grpc_srcs})
-target_link_libraries(failure_injector_svc_client
- ${_GRPC_GRPCPP_UNSECURE}
- ${_PROTOBUF_LIBPROTOBUF})
-
-# Build unit tests
-set(CPP_UNIT_FUSE cpp_unit)
-foreach(_target
- TestFilePathFailures
- TestFailureInjector)
- add_executable(${_target}
- "${FS_DIR}/${CPP_UNIT_FUSE}/${_target}.cc"
- ${FS_DIR}/failure_injector.cc)
- target_link_libraries(${_target}
- cppunit)
-endforeach()
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with this
+# work for additional information regarding copyright ownership. The ASF
+# licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+# cmake build file for C++ failure_injection_service.
+# Assumes protobuf and gRPC have been installed using cmake.
+
+cmake_minimum_required(VERSION 2.8)
+
+project(FailureInjectionService C CXX)
+
+set(BASE_DIR ".")
+set(FS_DIR "${BASE_DIR}/FileSystem")
+set(SRV_DIR "${BASE_DIR}/Service/cpp")
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -fpermissive -Wall")
+
+# This assumes that gRPC and all its dependencies are already installed
+# on this system, so they can be located by find_package().
+
+# Find Protobuf installation
+# Looks for protobuf-config.cmake file installed by Protobuf's cmake
+# installation.
+set(protobuf_MODULE_COMPATIBLE TRUE)
+find_package(Protobuf CONFIG REQUIRED)
+message(STATUS "Using protobuf ${protobuf_VERSION}")
+
+set(_PROTOBUF_LIBPROTOBUF protobuf::libprotobuf)
+set(_PROTOBUF_PROTOC $)
+
+# Find gRPC installation
+# Looks for gRPCConfig.cmake file installed by gRPC's cmake installation.
+find_package(gRPC CONFIG REQUIRED)
+message(STATUS "Using gRPC ${gRPC_VERSION}")
+
+set(_GRPC_GRPCPP_UNSECURE gRPC::grpc++_unsecure)
+set(_GRPC_CPP_PLUGIN_EXECUTABLE $)
+
+# Proto file
+get_filename_component(hw_proto
+ "${BASE_DIR}/Service/protos/failure_injection_service.proto"
+ ABSOLUTE)
+get_filename_component(hw_proto_path "${hw_proto}" PATH)
+
+# Generated sources
+set(hw_proto_srcs
+ "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.pb.cc")
+set(hw_proto_hdrs
+ "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.pb.h")
+set(hw_grpc_srcs
+ "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.grpc.pb.cc")
+set(hw_grpc_hdrs
+ "${CMAKE_CURRENT_BINARY_DIR}/failure_injection_service.grpc.pb.h")
+add_custom_command(
+ OUTPUT "${hw_proto_srcs}" "${hw_proto_hdrs}"
+ "${hw_grpc_srcs}" "${hw_grpc_hdrs}"
+ COMMAND ${_PROTOBUF_PROTOC}
+ ARGS --grpc_out "${CMAKE_CURRENT_BINARY_DIR}"
+ --cpp_out "${CMAKE_CURRENT_BINARY_DIR}"
+ -I "${hw_proto_path}"
+ --plugin=protoc-gen-grpc="${_GRPC_CPP_PLUGIN_EXECUTABLE}"
+ "${hw_proto}"
+ DEPENDS "${hw_proto}")
+
+# Include generated *.pb.h files
+include_directories("${CMAKE_CURRENT_BINARY_DIR}"
+ "${FS_DIR}"
+ "${SRV_DIR}")
+
+#add_compile_options("-fpermissive")
+
+# Build server
+find_package(Threads)
+add_executable(failure_injector_svc_server
+ ${FS_DIR}/failure_injector_fs.cc ${FS_DIR}/failure_injector.cc
+ ${SRV_DIR}/failure_injector_svc_server.cc ${SRV_DIR}/run_grpc_service.cc
+ ${hw_proto_srcs}
+ ${hw_grpc_srcs})
+target_link_libraries(failure_injector_svc_server
+ ${_GRPC_GRPCPP_UNSECURE}
+ fuse3
+ ${_PROTOBUF_LIBPROTOBUF})
+
+# Build client
+add_executable(failure_injector_svc_client
+ "${SRV_DIR}/failure_injector_svc_client.cc"
+ ${hw_proto_srcs}
+ ${hw_grpc_srcs})
+target_link_libraries(failure_injector_svc_client
+ ${_GRPC_GRPCPP_UNSECURE}
+ ${_PROTOBUF_LIBPROTOBUF})
+
+# Build unit tests
+set(CPP_UNIT_FUSE cpp_unit)
+foreach(_target
+ TestFilePathFailures
+ TestFailureInjector)
+ add_executable(${_target}
+ "${FS_DIR}/${CPP_UNIT_FUSE}/${_target}.cc"
+ ${FS_DIR}/failure_injector.cc)
+ target_link_libraries(${_target}
+ cppunit)
+endforeach()
diff --git a/tools/fault-injection-service/README.md b/tools/fault-injection-service/README.md
index 7fffd2349cd1..86952b6f720f 100644
--- a/tools/fault-injection-service/README.md
+++ b/tools/fault-injection-service/README.md
@@ -27,8 +27,43 @@ Dependencies
built/installed from sources
- cppunit & cppunit-devel
+Building Dependencies
+======================
+
+Building libfuse3 from the sources
+------------------------------------
+ - You can get it from https://github.com/libfuse/libfuse/releases/
+ * https://github.com/libfuse/libfuse/releases/tag/fuse-3.10.2
+ - follow the README.md for building libfuse
+ - you may need to get "meson-0.42.0" or above to build this.
+
+CMAKE
+======
+ - this will need cmake-3.14.0 or higher
+ - if required build from the sources. (tested with cmake-3.14.0 and
+ cmake-3.6.2)
+
+Building grpc from the sources
+------------------------------------
+ - https://grpc.io/docs/languages/cpp/quickstart/
+ - sudo apt install -y build-essential autoconf libtool pkg-config
+ - git clone --recurse-submodules -b v1.35.0 https://github.com/grpc/grpc
+ - Follow build instructions
+ - mkdir -p cmake/build
+ - pushd cmake/build
+ - cmake -DgRPC_INSTALL=ON -DgRPC_BUILD_TESTS=OFF\
+ -DCMAKE_INSTALL_PREFIX=$MY_INSTALL_DIR ../..
+ - make -j
+ - make install
+
+Finally
+-------
+ - Make sure all the dependencies are in $PATH.
+ - This build was last tested on (debian 4.18.5-1.el7.elrepo.x86_64
+ GNU/Linux) and Ubuntu 18:0.1.0.0-4.
+
Installation
-------------
+=============
mkdir Build
cd Build
do 'cmake ..'