diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 026c0a8cea4..9326564441d 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -2528,4 +2528,14 @@
filesystem semantics.
+
+
+ ozone.om.layout.version
+ OZONE, OM
+ V0
+ Temporary workaround for OM upgrade and will be replaced once
+ upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+ existing unit test cases won't be affected. New OM version should be 'V1'
+
+
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index f16679a681e..0c168153cfe 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -246,4 +246,12 @@ private OMConfigKeys() {
"ozone.om.enable.filesystem.paths";
public static final boolean OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT =
false;
+
+ // TODO: Temporary workaround for OM upgrade path and will be replaced once
+ // upgrade HDDS-3698 story reaches consensus. Defaulting to 'V0' so that
+ // existing unit test cases won't be affected. New OM version should be 'V1'.
+ public static final String OZONE_OM_LAYOUT_VERSION =
+ "ozone.om.layout.version";
+ public static final String OZONE_OM_LAYOUT_VERSION_DEFAULT = "V0";
+ public static final String OZONE_OM_LAYOUT_VERSION_V1 = "V1";
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
new file mode 100644
index 00000000000..4c820479f24
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmDirectoryInfo.java
@@ -0,0 +1,266 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.helpers;
+
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+
+import java.util.*;
+
+/**
+ * This class represents the directory information by keeping each component
+ * in the user given path and a pointer to its parent directory element in the
+ * path. Also, it stores directory node related metdata details.
+ */
+public class OmDirectoryInfo extends WithObjectID {
+ private long parentObjectID; // pointer to parent directory
+
+ private String name; // directory name
+
+ private long creationTime;
+ private long modificationTime;
+
+ private List acls;
+
+ public OmDirectoryInfo(Builder builder) {
+ this.name = builder.name;
+ this.acls = builder.acls;
+ this.metadata = builder.metadata;
+ this.objectID = builder.objectID;
+ this.updateID = builder.updateID;
+ this.parentObjectID = builder.parentObjectID;
+ this.creationTime = builder.creationTime;
+ this.modificationTime = builder.modificationTime;
+ }
+
+ /**
+ * Returns new builder class that builds a OmPrefixInfo.
+ *
+ * @return Builder
+ */
+ public static OmDirectoryInfo.Builder newBuilder() {
+ return new OmDirectoryInfo.Builder();
+ }
+
+ /**
+ * Builder for Directory Info.
+ */
+ public static class Builder {
+ private long parentObjectID; // pointer to parent directory
+
+ private long objectID;
+ private long updateID;
+
+ private String name;
+
+ private long creationTime;
+ private long modificationTime;
+
+ private List acls;
+ private Map metadata;
+
+ public Builder() {
+ //Default values
+ this.acls = new LinkedList<>();
+ this.metadata = new HashMap<>();
+ }
+
+ public Builder setParentObjectID(long parentObjectId) {
+ this.parentObjectID = parentObjectId;
+ return this;
+ }
+
+ public Builder setObjectID(long objectId) {
+ this.objectID = objectId;
+ return this;
+ }
+
+ public Builder setUpdateID(long updateId) {
+ this.updateID = updateId;
+ return this;
+ }
+
+ public Builder setName(String dirName) {
+ this.name = dirName;
+ return this;
+ }
+
+ public Builder setCreationTime(long newCreationTime) {
+ this.creationTime = newCreationTime;
+ return this;
+ }
+
+ public Builder setModificationTime(long newModificationTime) {
+ this.modificationTime = newModificationTime;
+ return this;
+ }
+
+ public Builder setAcls(List listOfAcls) {
+ if (listOfAcls != null) {
+ this.acls.addAll(listOfAcls);
+ }
+ return this;
+ }
+
+ public Builder addAcl(OzoneAcl ozoneAcl) {
+ if (ozoneAcl != null) {
+ this.acls.add(ozoneAcl);
+ }
+ return this;
+ }
+
+ public Builder addMetadata(String key, String value) {
+ metadata.put(key, value);
+ return this;
+ }
+
+ public Builder addAllMetadata(Map additionalMetadata) {
+ if (additionalMetadata != null) {
+ metadata.putAll(additionalMetadata);
+ }
+ return this;
+ }
+
+ public OmDirectoryInfo build() {
+ return new OmDirectoryInfo(this);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return getPath() + ":" + getObjectID();
+ }
+
+ public long getParentObjectID() {
+ return parentObjectID;
+ }
+
+ public String getPath() {
+ return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getName();
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public long getCreationTime() {
+ return creationTime;
+ }
+
+ public long getModificationTime() {
+ return modificationTime;
+ }
+
+ public List getAcls() {
+ return acls;
+ }
+
+ /**
+ * Creates DirectoryInfo protobuf from OmDirectoryInfo.
+ */
+ public OzoneManagerProtocolProtos.DirectoryInfo getProtobuf() {
+ OzoneManagerProtocolProtos.DirectoryInfo.Builder pib =
+ OzoneManagerProtocolProtos.DirectoryInfo.newBuilder().setName(name)
+ .setCreationTime(creationTime)
+ .setModificationTime(modificationTime)
+ .addAllMetadata(KeyValueUtil.toProtobuf(metadata))
+ .setObjectID(objectID)
+ .setUpdateID(updateID)
+ .setParentID(parentObjectID);
+ if (acls != null) {
+ pib.addAllAcls(OzoneAclUtil.toProtobuf(acls));
+ }
+ return pib.build();
+ }
+
+ /**
+ * Parses DirectoryInfo protobuf and creates OmPrefixInfo.
+ * @param dirInfo
+ * @return instance of OmDirectoryInfo
+ */
+ public static OmDirectoryInfo getFromProtobuf(
+ OzoneManagerProtocolProtos.DirectoryInfo dirInfo) {
+ OmDirectoryInfo.Builder opib = OmDirectoryInfo.newBuilder()
+ .setName(dirInfo.getName())
+ .setCreationTime(dirInfo.getCreationTime())
+ .setModificationTime(dirInfo.getModificationTime())
+ .setAcls(OzoneAclUtil.fromProtobuf(dirInfo.getAclsList()));
+ if (dirInfo.getMetadataList() != null) {
+ opib.addAllMetadata(KeyValueUtil
+ .getFromProtobuf(dirInfo.getMetadataList()));
+ }
+ if (dirInfo.hasObjectID()) {
+ opib.setObjectID(dirInfo.getObjectID());
+ }
+ if (dirInfo.hasParentID()) {
+ opib.setParentObjectID(dirInfo.getParentID());
+ }
+ if (dirInfo.hasUpdateID()) {
+ opib.setUpdateID(dirInfo.getUpdateID());
+ }
+ return opib.build();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ OmDirectoryInfo omDirInfo = (OmDirectoryInfo) o;
+ return creationTime == omDirInfo.creationTime &&
+ modificationTime == omDirInfo.modificationTime &&
+ name.equals(omDirInfo.name) &&
+ Objects.equals(metadata, omDirInfo.metadata) &&
+ Objects.equals(acls, omDirInfo.acls) &&
+ objectID == omDirInfo.objectID &&
+ updateID == omDirInfo.updateID &&
+ parentObjectID == omDirInfo.parentObjectID;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectID, parentObjectID, name);
+ }
+
+ /**
+ * Return a new copy of the object.
+ */
+ public OmDirectoryInfo copyObject() {
+ OmDirectoryInfo.Builder builder = new Builder()
+ .setName(name)
+ .setCreationTime(creationTime)
+ .setModificationTime(modificationTime)
+ .setParentObjectID(parentObjectID)
+ .setObjectID(objectID)
+ .setUpdateID(updateID);
+
+ acls.forEach(acl -> builder.addAcl(new OzoneAcl(acl.getType(),
+ acl.getName(), (BitSet) acl.getAclBitSet().clone(),
+ acl.getAclScope())));
+
+ if (metadata != null) {
+ metadata.forEach((k, v) -> builder.addMetadata(k, v));
+ }
+
+ return builder.build();
+ }
+}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
index d0e8bee5234..782b6f0af7a 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java
@@ -25,9 +25,11 @@
import java.util.Map;
import java.util.Objects;
+import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocationList;
import org.apache.hadoop.ozone.protocolPB.OMPBHelper;
@@ -52,6 +54,8 @@ public final class OmKeyInfo extends WithObjectID {
private HddsProtos.ReplicationType type;
private HddsProtos.ReplicationFactor factor;
private FileEncryptionInfo encInfo;
+ private String fileName; // leaf node name
+ private long parentObjectID; // pointer to parent directory
/**
* ACL Information.
@@ -94,6 +98,22 @@ public final class OmKeyInfo extends WithObjectID {
this.updateID = updateID;
}
+ @SuppressWarnings("parameternumber")
+ OmKeyInfo(String volumeName, String bucketName, String keyName,
+ String fileName, List versions,
+ long dataSize, long creationTime, long modificationTime,
+ HddsProtos.ReplicationType type,
+ HddsProtos.ReplicationFactor factor,
+ Map metadata,
+ FileEncryptionInfo encInfo, List acls,
+ long parentObjectID, long objectID, long updateID) {
+ this(volumeName, bucketName, keyName, versions, dataSize,
+ creationTime, modificationTime, type, factor, metadata, encInfo,
+ acls, objectID, updateID);
+ this.fileName = fileName;
+ this.parentObjectID = parentObjectID;
+ }
+
public String getVolumeName() {
return volumeName;
}
@@ -126,6 +146,22 @@ public void setDataSize(long size) {
this.dataSize = size;
}
+ public void setFileName(String fileName) {
+ this.fileName = fileName;
+ }
+
+ public String getFileName() {
+ return fileName;
+ }
+
+ public long getParentObjectID() {
+ return parentObjectID;
+ }
+
+ public void setParentObjectID(long parentObjectID) {
+ this.parentObjectID = parentObjectID;
+ }
+
public synchronized OmKeyLocationInfoGroup getLatestVersionLocations() {
return keyLocationVersions.size() == 0? null :
keyLocationVersions.get(keyLocationVersions.size() - 1);
@@ -267,6 +303,9 @@ public static class Builder {
private List acls;
private long objectID;
private long updateID;
+ // not persisted to DB. FileName will be the last element in path keyName.
+ private String fileName;
+ private long parentObjectID;
public Builder() {
this.metadata = new HashMap<>();
@@ -369,11 +408,22 @@ public Builder setUpdateID(long id) {
return this;
}
+ public Builder setFileName(String keyFileName) {
+ this.fileName = keyFileName;
+ return this;
+ }
+
+ public Builder setParentObjectID(long parentID) {
+ this.parentObjectID = parentID;
+ return this;
+ }
+
public OmKeyInfo build() {
return new OmKeyInfo(
- volumeName, bucketName, keyName, omKeyLocationInfoGroups,
- dataSize, creationTime, modificationTime, type, factor, metadata,
- encInfo, acls, objectID, updateID);
+ volumeName, bucketName, keyName, fileName,
+ omKeyLocationInfoGroups, dataSize, creationTime,
+ modificationTime, type, factor, metadata, encInfo, acls,
+ parentObjectID, objectID, updateID);
}
}
@@ -385,12 +435,33 @@ public KeyInfo getProtobuf() {
return getProtobuf(false);
}
+ /**
+ * For network transmit.
+ *
+ * @param fullKeyName the user given full key name
+ * @return key info with the user given full key name
+ */
+ public KeyInfo getProtobuf(String fullKeyName) {
+ return getProtobuf(false, fullKeyName);
+ }
+
/**
*
* @param ignorePipeline true for persist to DB, false for network transmit.
* @return
*/
public KeyInfo getProtobuf(boolean ignorePipeline) {
+ return getProtobuf(ignorePipeline, null);
+ }
+
+ /**
+ * Gets KeyInfo with the user given key name.
+ *
+ * @param ignorePipeline ignore pipeline flag
+ * @param fullKeyName user given key name
+ * @return key info object
+ */
+ private KeyInfo getProtobuf(boolean ignorePipeline, String fullKeyName) {
long latestVersion = keyLocationVersions.size() == 0 ? -1 :
keyLocationVersions.get(keyLocationVersions.size() - 1).getVersion();
@@ -402,7 +473,6 @@ public KeyInfo getProtobuf(boolean ignorePipeline) {
KeyInfo.Builder kb = KeyInfo.newBuilder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
- .setKeyName(keyName)
.setDataSize(dataSize)
.setFactor(factor)
.setType(type)
@@ -413,7 +483,13 @@ public KeyInfo getProtobuf(boolean ignorePipeline) {
.addAllMetadata(KeyValueUtil.toProtobuf(metadata))
.addAllAcls(OzoneAclUtil.toProtobuf(acls))
.setObjectID(objectID)
- .setUpdateID(updateID);
+ .setUpdateID(updateID)
+ .setParentID(parentObjectID);
+ if (StringUtils.isNotBlank(fullKeyName)) {
+ kb.setKeyName(fullKeyName);
+ } else {
+ kb.setKeyName(keyName);
+ }
if (encInfo != null) {
kb.setFileEncryptionInfo(OMPBHelper.convert(encInfo));
}
@@ -451,6 +527,11 @@ public static OmKeyInfo getFromProtobuf(KeyInfo keyInfo) {
if (keyInfo.hasUpdateID()) {
builder.setUpdateID(keyInfo.getUpdateID());
}
+ if (keyInfo.hasParentID()) {
+ builder.setParentObjectID(keyInfo.getParentID());
+ }
+ // not persisted to DB. FileName will be filtered out from keyName
+ builder.setFileName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
return builder.build();
}
@@ -464,6 +545,8 @@ public String getObjectInfo() {
", creationTime='" + creationTime + '\'' +
", type='" + type + '\'' +
", factor='" + factor + '\'' +
+ ", objectID='" + objectID + '\'' +
+ ", parentID='" + parentObjectID + '\'' +
'}';
}
@@ -489,12 +572,13 @@ public boolean equals(Object o) {
Objects.equals(metadata, omKeyInfo.metadata) &&
Objects.equals(acls, omKeyInfo.acls) &&
objectID == omKeyInfo.objectID &&
- updateID == omKeyInfo.updateID;
+ updateID == omKeyInfo.updateID &&
+ parentObjectID == omKeyInfo.parentObjectID;
}
@Override
public int hashCode() {
- return Objects.hash(volumeName, bucketName, keyName);
+ return Objects.hash(volumeName, bucketName, keyName, parentObjectID);
}
/**
@@ -511,8 +595,10 @@ public OmKeyInfo copyObject() {
.setReplicationType(type)
.setReplicationFactor(factor)
.setFileEncryptionInfo(encInfo)
- .setObjectID(objectID).setUpdateID(updateID);
-
+ .setObjectID(objectID)
+ .setUpdateID(updateID)
+ .setParentObjectID(parentObjectID)
+ .setFileName(fileName);
keyLocationVersions.forEach(keyLocationVersion ->
builder.addOmKeyLocationInfoGroup(
@@ -540,4 +626,11 @@ public OmKeyInfo copyObject() {
public void clearFileEncryptionInfo() {
this.encInfo = null;
}
+
+ public String getPath() {
+ if (StringUtils.isBlank(getFileName())) {
+ return getKeyName();
+ }
+ return getParentObjectID() + OzoneConsts.OM_KEY_PREFIX + getFileName();
+ }
}
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
index d1491ed6c50..a046f1d101d 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java
@@ -20,6 +20,7 @@
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.StringUtils;
+import javax.annotation.Nonnull;
import java.nio.file.Paths;
import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER;
@@ -116,4 +117,60 @@ public static boolean isValidName(String src) {
}
return true;
}
+
+ /**
+ * The function returns leaf node name from the given absolute path. For
+ * example, the given key path '/a/b/c/d/e/file1' then it returns leaf node
+ * name 'file1'.
+ */
+ public static String getFileName(@Nonnull String keyName) {
+ java.nio.file.Path fileName = Paths.get(keyName).getFileName();
+ if (fileName != null) {
+ return fileName.toString();
+ }
+ // failed to converts a path key
+ return keyName;
+ }
+
+ /**
+ * The function returns parent directory from the given absolute path. For
+ * example, the given key path '/a/b/c/d/e/file1' then it returns parent
+ * directory name as 'e'.
+ *
+ * @param keyName key name
+ */
+ public static String getParentDir(@Nonnull String keyName) {
+ java.nio.file.Path fileName = Paths.get(keyName).getParent();
+ if (fileName != null) {
+ return fileName.toString();
+ }
+ // failed to converts a path key
+ return keyName;
+ }
+
+ /**
+ * This function appends the given file name to the given key name path.
+ *
+ * @param keyName key name
+ * @param fileName file name
+ * @return full path
+ */
+ public static String appendFileNameToKeyPath(String keyName,
+ String fileName) {
+ StringBuilder newToKeyName = new StringBuilder(keyName);
+ newToKeyName.append(OZONE_URI_DELIMITER);
+ newToKeyName.append(fileName);
+ return newToKeyName.toString();
+ }
+
+ /**
+ * Returns the number of path components in the given keyName.
+ *
+ * @param keyName keyname
+ * @return path components count
+ */
+ public static int getFileCount(String keyName) {
+ java.nio.file.Path keyPath = Paths.get(keyName);
+ return keyPath.getNameCount();
+ }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
new file mode 100644
index 00000000000..87e9f09bae3
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneDirectory.java
@@ -0,0 +1,207 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+import static org.junit.Assert.fail;
+
+/**
+ * Test verifies the entries and operations in directory table.
+ */
+public class TestOzoneDirectory {
+
+ @Rule
+ public Timeout timeout = new Timeout(300000);
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestOzoneDirectory.class);
+
+ private MiniOzoneCluster cluster;
+ private FileSystem fs;
+ private OzoneFileSystem o3fs;
+ private String volumeName;
+ private String bucketName;
+
+ @Test(timeout = 300_000)
+ public void testMultiLevelDirs() throws Exception {
+ setupOzoneFileSystem();
+ // Op 1. create dir -> /d1/d2/d3/d4/
+ // Op 2. create dir -> /d1/d2/d3/d4/d5
+ // Op 3. create dir -> /d1/d2/d3/d4/d6
+ Path parent = new Path("/d1/d2/d3/d4/");
+ fs.mkdirs(parent);
+
+ OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+ OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+ omMgr.getBucketKey(volumeName, bucketName));
+ Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+ ArrayList dirKeys = new ArrayList<>();
+ long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+ dirKeys, omMgr);
+ long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+ omMgr);
+ long d3ObjectID = verifyDirKey(d2ObjectID, "d3", "/d1/d2/d3",
+ dirKeys, omMgr);
+ long d4ObjectID = verifyDirKey(d3ObjectID, "d4", "/d1/d2/d3/d4",
+ dirKeys, omMgr);
+
+ Assert.assertEquals("Wrong OM numKeys metrics",
+ 4, cluster.getOzoneManager().getMetrics().getNumKeys());
+
+ // verify entries in directory table
+ TableIterator> iterator =
+ omMgr.getDirectoryTable().iterator();
+ iterator.seekToFirst();
+ int count = dirKeys.size();
+ Assert.assertEquals("Unexpected directory table entries!", 4, count);
+ while (iterator.hasNext()) {
+ count--;
+ Table.KeyValue value = iterator.next();
+ verifyKeyFormat(value.getKey(), dirKeys);
+ }
+ Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+ // verify entries in key table
+ TableIterator> keyTableItr =
+ omMgr.getKeyTable().iterator();
+ while (keyTableItr.hasNext()) {
+ fail("Shouldn't add any entries in KeyTable!");
+ }
+
+ // create sub-dirs under same parent
+ Path subDir5 = new Path("/d1/d2/d3/d4/d5");
+ fs.mkdirs(subDir5);
+ Path subDir6 = new Path("/d1/d2/d3/d4/d6");
+ fs.mkdirs(subDir6);
+ long d5ObjectID = verifyDirKey(d4ObjectID, "d5",
+ "/d1/d2/d3/d4/d5", dirKeys, omMgr);
+ long d6ObjectID = verifyDirKey(d4ObjectID, "d6",
+ "/d1/d2/d3/d4/d6", dirKeys, omMgr);
+ Assert.assertTrue("Wrong objectIds for sub-dirs[" + d5ObjectID +
+ "/d5, " + d6ObjectID + "/d6] of same parent!",
+ d5ObjectID != d6ObjectID);
+
+ Assert.assertEquals("Wrong OM numKeys metrics",
+ 6, cluster.getOzoneManager().getMetrics().getNumKeys());
+ }
+
+ /**
+ * Verify key name format and the DB key existence in the expected dirKeys
+ * list.
+ *
+ * @param key table keyName
+ * @param dirKeys expected keyName
+ */
+ private void verifyKeyFormat(String key, ArrayList dirKeys) {
+ String[] keyParts = StringUtils.split(key,
+ OzoneConsts.OM_KEY_PREFIX.charAt(0));
+ Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+ boolean removed = dirKeys.remove(key);
+ Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+ removed);
+ }
+
+ long verifyDirKey(long parentId, String dirKey, String absolutePath,
+ ArrayList dirKeys, OMMetadataManager omMgr)
+ throws Exception {
+ String dbKey = parentId + "/" + dirKey;
+ dirKeys.add(dbKey);
+ OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+ Assert.assertNotNull("Failed to find " + absolutePath +
+ " using dbKey: " + dbKey, dirInfo);
+ Assert.assertEquals("Parent Id mismatches", parentId,
+ dirInfo.getParentObjectID());
+ Assert.assertEquals("Mismatches directory name", dirKey,
+ dirInfo.getName());
+ Assert.assertTrue("Mismatches directory creation time param",
+ dirInfo.getCreationTime() > 0);
+ Assert.assertEquals("Mismatches directory modification time param",
+ dirInfo.getCreationTime(), dirInfo.getModificationTime());
+ Assert.assertEquals("Wrong representation!",
+ dbKey + ":" + dirInfo.getObjectID(), dirInfo.toString());
+ return dirInfo.getObjectID();
+ }
+
+ private void setupOzoneFileSystem()
+ throws IOException, TimeoutException, InterruptedException {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+ conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(3)
+ .build();
+ cluster.waitForClusterToBeReady();
+ // create a volume and a bucket to be used by OzoneFileSystem
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+ volumeName = bucket.getVolumeName();
+ bucketName = bucket.getName();
+
+ String rootPath = String.format("%s://%s.%s/",
+ OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+ bucket.getVolumeName());
+
+ // Set the fs.defaultFS and start the filesystem
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+ // Set the number of keys to be processed during batch operate.
+ conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+ fs = FileSystem.get(conf);
+ }
+
+ @After
+ public void tearDown() {
+ IOUtils.closeQuietly(fs);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+}
\ No newline at end of file
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
new file mode 100644
index 00000000000..d097268aea5
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileOps.java
@@ -0,0 +1,231 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.util.StringUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.concurrent.TimeoutException;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
+
+/**
+ * Test verifies the entries and operations in file table, open file table etc.
+ */
+public class TestOzoneFileOps {
+
+ @Rule
+ public Timeout timeout = new Timeout(300000);
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestOzoneFileOps.class);
+
+ private MiniOzoneCluster cluster;
+ private FileSystem fs;
+ private String volumeName;
+ private String bucketName;
+
+ @Before
+ public void setupOzoneFileSystem()
+ throws IOException, TimeoutException, InterruptedException {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+ conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, false);
+ cluster = MiniOzoneCluster.newBuilder(conf)
+ .setNumDatanodes(3)
+ .build();
+ cluster.waitForClusterToBeReady();
+ // create a volume and a bucket to be used by OzoneFileSystem
+ OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(cluster);
+ volumeName = bucket.getVolumeName();
+ bucketName = bucket.getName();
+
+ String rootPath = String.format("%s://%s.%s/",
+ OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
+ bucket.getVolumeName());
+
+ // Set the fs.defaultFS and start the filesystem
+ conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
+ // Set the number of keys to be processed during batch operate.
+ conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
+ fs = FileSystem.get(conf);
+ }
+
+ @After
+ public void tearDown() {
+ IOUtils.closeQuietly(fs);
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test(timeout = 300_000)
+ public void testCreateFile() throws Exception {
+ // Op 1. create dir -> /d1/d2/d3/d4/
+ Path parent = new Path("/d1/d2/");
+ Path file = new Path(parent, "file1");
+ FSDataOutputStream outputStream = fs.create(file);
+ String openFileKey = "";
+
+ OMMetadataManager omMgr = cluster.getOzoneManager().getMetadataManager();
+ OmBucketInfo omBucketInfo = omMgr.getBucketTable().get(
+ omMgr.getBucketKey(volumeName, bucketName));
+ Assert.assertNotNull("Failed to find bucketInfo", omBucketInfo);
+
+ ArrayList dirKeys = new ArrayList<>();
+ long d1ObjectID = verifyDirKey(omBucketInfo.getObjectID(), "d1", "/d1",
+ dirKeys, omMgr);
+ long d2ObjectID = verifyDirKey(d1ObjectID, "d2", "/d1/d2", dirKeys,
+ omMgr);
+ openFileKey = d2ObjectID + OzoneConsts.OM_KEY_PREFIX + file.getName();
+
+ // verify entries in directory table
+ TableIterator> iterator =
+ omMgr.getDirectoryTable().iterator();
+ iterator.seekToFirst();
+ int count = dirKeys.size();
+ Assert.assertEquals("Unexpected directory table entries!", 2, count);
+ while (iterator.hasNext()) {
+ count--;
+ Table.KeyValue value = iterator.next();
+ verifyKeyFormat(value.getKey(), dirKeys);
+ }
+ Assert.assertEquals("Unexpected directory table entries!", 0, count);
+
+ // verify entries in open key table
+ TableIterator> keysItr =
+ omMgr.getOpenKeyTable().iterator();
+ keysItr.seekToFirst();
+
+ while (keysItr.hasNext()) {
+ count++;
+ Table.KeyValue value = keysItr.next();
+ verifyOpenKeyFormat(value.getKey(), openFileKey);
+ verifyOMFileInfoFormat(value.getValue(), file.getName(), d2ObjectID);
+ }
+ Assert.assertEquals("Unexpected file table entries!", 1, count);
+
+ // trigger CommitKeyRequest
+ outputStream.close();
+
+ Assert.assertTrue("Failed to commit the open file:" + openFileKey,
+ omMgr.getOpenKeyTable().isEmpty());
+
+ OmKeyInfo omKeyInfo = omMgr.getKeyTable().get(openFileKey);
+ Assert.assertNotNull("Invalid Key!", omKeyInfo);
+ verifyOMFileInfoFormat(omKeyInfo, file.getName(), d2ObjectID);
+ }
+
+ private void verifyOMFileInfoFormat(OmKeyInfo omKeyInfo, String fileName,
+ long parentID) {
+ Assert.assertEquals("Wrong keyName", fileName,
+ omKeyInfo.getKeyName());
+ Assert.assertEquals("Wrong parentID", parentID,
+ omKeyInfo.getParentObjectID());
+ String dbKey = parentID + OzoneConsts.OM_KEY_PREFIX + fileName;
+ Assert.assertEquals("Wrong path format", dbKey,
+ omKeyInfo.getPath());
+ }
+
+ /**
+ * Verify key name format and the DB key existence in the expected dirKeys
+ * list.
+ *
+ * @param key table keyName
+ * @param dirKeys expected keyName
+ */
+ private void verifyKeyFormat(String key, ArrayList dirKeys) {
+ String[] keyParts = StringUtils.split(key,
+ OzoneConsts.OM_KEY_PREFIX.charAt(0));
+ Assert.assertEquals("Invalid KeyName", 2, keyParts.length);
+ boolean removed = dirKeys.remove(key);
+ Assert.assertTrue("Key:" + key + " doesn't exists in directory table!",
+ removed);
+ }
+
+ /**
+ * Verify key name format and the DB key existence in the expected
+ * openFileKeys list.
+ *
+ * @param key table keyName
+ * @param openFileKey expected keyName
+ */
+ private void verifyOpenKeyFormat(String key, String openFileKey) {
+ String[] keyParts = StringUtils.split(key,
+ OzoneConsts.OM_KEY_PREFIX.charAt(0));
+ Assert.assertEquals("Invalid KeyName:" + key, 3, keyParts.length);
+ String[] expectedOpenFileParts = StringUtils.split(openFileKey,
+ OzoneConsts.OM_KEY_PREFIX.charAt(0));
+ Assert.assertEquals("ParentId/Key:" + expectedOpenFileParts[0]
+ + " doesn't exists in openFileTable!",
+ expectedOpenFileParts[0] + OzoneConsts.OM_KEY_PREFIX
+ + expectedOpenFileParts[1],
+ keyParts[0] + OzoneConsts.OM_KEY_PREFIX + keyParts[1]);
+ }
+
+ long verifyDirKey(long parentId, String dirKey, String absolutePath,
+ ArrayList dirKeys, OMMetadataManager omMgr)
+ throws Exception {
+ String dbKey = parentId + OzoneConsts.OM_KEY_PREFIX + dirKey;
+ dirKeys.add(dbKey);
+ OmDirectoryInfo dirInfo = omMgr.getDirectoryTable().get(dbKey);
+ Assert.assertNotNull("Failed to find " + absolutePath +
+ " using dbKey: " + dbKey, dirInfo);
+ Assert.assertEquals("Parent Id mismatches", parentId,
+ dirInfo.getParentObjectID());
+ Assert.assertEquals("Mismatches directory name", dirKey,
+ dirInfo.getName());
+ Assert.assertTrue("Mismatches directory creation time param",
+ dirInfo.getCreationTime() > 0);
+ Assert.assertEquals("Mismatches directory modification time param",
+ dirInfo.getCreationTime(), dirInfo.getModificationTime());
+ return dirInfo.getObjectID();
+ }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 46c01153947..52ad09eebce 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -64,6 +64,7 @@
import static org.junit.Assert.fail;
import org.apache.hadoop.test.LambdaTestUtils;
+import org.jetbrains.annotations.NotNull;
import org.junit.After;
import org.junit.Assert;
import org.junit.Rule;
@@ -100,14 +101,19 @@ public TestOzoneFileSystem(boolean setDefaultFs) {
private static final Logger LOG =
LoggerFactory.getLogger(TestOzoneFileSystem.class);
- private boolean enabledFileSystemPaths;
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ protected boolean enabledFileSystemPaths;
- private MiniOzoneCluster cluster;
- private FileSystem fs;
- private OzoneFileSystem o3fs;
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ protected MiniOzoneCluster cluster;
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ protected FileSystem fs;
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ protected OzoneFileSystem o3fs;
private String volumeName;
private String bucketName;
- private int rootItemCount;
+ @SuppressWarnings("checkstyle:VisibilityModifier")
+ protected int rootItemCount;
private Trash trash;
public void testCreateFileShouldCheckExistenceOfDirWithSameName()
@@ -249,9 +255,9 @@ public void tearDown() {
}
}
- private void setupOzoneFileSystem()
+ protected void setupOzoneFileSystem()
throws IOException, TimeoutException, InterruptedException {
- OzoneConfiguration conf = new OzoneConfiguration();
+ OzoneConfiguration conf = getOzoneConfig();
conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
enabledFileSystemPaths);
@@ -276,7 +282,7 @@ private void setupOzoneFileSystem()
trash = new Trash(conf);
}
- private void testOzoneFsServiceLoader() throws IOException {
+ protected void testOzoneFsServiceLoader() throws IOException {
assertEquals(
FileSystem.getFileSystemClass(OzoneConsts.OZONE_URI_SCHEME, null),
OzoneFileSystem.class);
@@ -615,7 +621,7 @@ public void testNonExplicitlyCreatedPathExistsAfterItsLeafsWereRemoved()
interimPath.getName(), fileStatus.getPath().getName());
}
- private void testRenameDir() throws Exception {
+ protected void testRenameDir() throws Exception {
final String dir = "/root_dir/dir1";
final Path source = new Path(fs.getUri().toString() + dir);
final Path dest = new Path(source.toString() + ".renamed");
@@ -634,10 +640,6 @@ private void testRenameDir() throws Exception {
// Test if one path belongs to other FileSystem.
LambdaTestUtils.intercept(IllegalArgumentException.class, "Wrong FS",
() -> fs.rename(new Path(fs.getUri().toString() + "fake" + dir), dest));
-
- // Renaming to same path when src is specified with scheme.
- assertTrue("Renaming to same path should be success.",
- fs.rename(source, new Path(dir)));
}
private OzoneKeyDetails getKey(Path keyPath, boolean isDirectory)
throws IOException {
@@ -777,4 +779,14 @@ public void testRenameToTrashDisabled() throws IOException {
// Cleanup
o3fs.delete(trashRoot, true);
}
+
+ @NotNull
+ protected OzoneConfiguration getOzoneConfig() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+ conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+ enabledFileSystemPaths);
+ return conf;
+ }
+
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
new file mode 100644
index 00000000000..aaa533a858a
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemV1.java
@@ -0,0 +1,221 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.test.LambdaTestUtils;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.Map;
+
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Ozone file system tests that are not covered by contract tests,
+ * layout version V1.
+ *
+ * Note: When adding new test(s), please append it in testFileSystem() to
+ * avoid test run time regression.
+ *
+ * TODO: This class will be replaced once HDDS-4332 is committed to the branch.
+ */
+@RunWith(Parameterized.class)
+public class TestOzoneFileSystemV1 extends TestOzoneFileSystem {
+
+ public TestOzoneFileSystemV1(boolean setDefaultFs) {
+ super(setDefaultFs);
+ }
+
+ /**
+ * Set a timeout for each test.
+ */
+ @Rule
+ public Timeout timeout = new Timeout(300000);
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(TestOzoneFileSystemV1.class);
+
+
+ @Test(timeout = 300_000)
+ @Override
+ public void testFileSystem() throws Exception {
+ setupOzoneFileSystem();
+
+ testOzoneFsServiceLoader();
+ o3fs = (OzoneFileSystem) fs;
+
+ testRenameDir();
+ tableCleanup();
+ }
+
+ protected void testRenameDir() throws Exception {
+ final String dir = "/root_dir/dir1";
+ final Path source = new Path(fs.getUri().toString() + dir);
+ final Path dest = new Path(source.toString() + ".renamed");
+ // Add a sub-dir to the directory to be moved.
+ final Path subdir = new Path(source, "sub_dir1");
+ fs.mkdirs(subdir);
+ LOG.info("Created dir {}", subdir);
+ LOG.info("Will move {} to {}", source, dest);
+ fs.rename(source, dest);
+
+ // TODO: Will modify this assertion with fs.exists once HDDS-4332 is
+ // committed to the branch.
+ TableIterator> dirIterator = cluster.getOzoneManager()
+ .getMetadataManager().getDirectoryTable().iterator();
+ String actualDestinKeyName = "/";
+ boolean actualDestinPathExists = false;
+ boolean actualSubDirPathExists = false;
+ Path destinSubDirPath = new Path(dest, "sub_dir1");
+ while (dirIterator.hasNext()) {
+ Table.KeyValue next = dirIterator.next();
+ OmDirectoryInfo dirInfo = next.getValue();
+ actualDestinKeyName = actualDestinKeyName + dirInfo.getName();
+
+ Path actualDestinKeyPath = new Path(fs.getUri().toString()
+ + actualDestinKeyName);
+ if (actualDestinKeyPath.equals(dest)) {
+ actualDestinPathExists = true;
+ }
+ if (actualDestinKeyPath.equals(destinSubDirPath)) {
+ actualSubDirPathExists = true;
+ }
+ if (dirIterator.hasNext()) {
+ actualDestinKeyName = actualDestinKeyName + "/";
+ }
+ }
+ assertTrue("Directory rename failed", actualDestinPathExists);
+ // Verify that the subdir is also renamed i.e. keys corresponding to the
+ // sub-directories of the renamed directory have also been renamed.
+ assertTrue("Keys under the renamed directory not renamed",
+ actualSubDirPathExists);
+
+ // Test if one path belongs to other FileSystem.
+ Path fakeDir = new Path(fs.getUri().toString() + "fake" + dir);
+ LambdaTestUtils.intercept(IllegalArgumentException.class, "Wrong FS",
+ () -> fs.rename(fakeDir, dest));
+ }
+
+ /**
+ * Cleanup keyTable and directoryTable explicitly as FS delete operation
+ * is not yet supported.
+ *
+ * @throws IOException DB failure
+ */
+ protected void tableCleanup() throws IOException {
+ OMMetadataManager metadataMgr = cluster.getOzoneManager()
+ .getMetadataManager();
+ TableIterator> dirTableIterator =
+ metadataMgr.getDirectoryTable().iterator();
+ dirTableIterator.seekToFirst();
+ ArrayList dirList = new ArrayList<>();
+ while (dirTableIterator.hasNext()) {
+ String key = dirTableIterator.key();
+ if (StringUtils.isNotBlank(key)) {
+ dirList.add(key);
+ }
+ dirTableIterator.next();
+ }
+
+ Iterator, CacheValue>>
+ cacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+ while(cacheIterator.hasNext()){
+ cacheIterator.next();
+ cacheIterator.remove();
+ }
+
+ for (String dirKey : dirList) {
+ metadataMgr.getDirectoryTable().delete(dirKey);
+ Assert.assertNull("Unexpected entry!",
+ metadataMgr.getDirectoryTable().get(dirKey));
+ }
+
+ Assert.assertTrue("DirTable is not empty",
+ metadataMgr.getDirectoryTable().isEmpty());
+
+ Assert.assertFalse(metadataMgr.getDirectoryTable().cacheIterator()
+ .hasNext());
+
+ TableIterator> keyTableIterator =
+ metadataMgr.getKeyTable().iterator();
+ keyTableIterator.seekToFirst();
+ ArrayList fileList = new ArrayList<>();
+ while (keyTableIterator.hasNext()) {
+ String key = keyTableIterator.key();
+ if (StringUtils.isNotBlank(key)) {
+ fileList.add(key);
+ }
+ keyTableIterator.next();
+ }
+
+ Iterator, CacheValue>>
+ keyCacheIterator = metadataMgr.getDirectoryTable().cacheIterator();
+ while(keyCacheIterator.hasNext()){
+ keyCacheIterator.next();
+ keyCacheIterator.remove();
+ }
+
+ for (String fileKey : fileList) {
+ metadataMgr.getKeyTable().delete(fileKey);
+ Assert.assertNull("Unexpected entry!",
+ metadataMgr.getKeyTable().get(fileKey));
+ }
+
+ Assert.assertTrue("KeyTable is not empty",
+ metadataMgr.getKeyTable().isEmpty());
+
+ rootItemCount = 0;
+ }
+
+ @NotNull
+ @Override
+ protected OzoneConfiguration getOzoneConfig() {
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.setInt(FS_TRASH_INTERVAL_KEY, 1);
+ conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS,
+ enabledFileSystemPaths);
+ conf.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ return conf;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 3ec014630f5..84dfd678e9d 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -765,6 +765,18 @@ message KeyInfo {
repeated OzoneAclInfo acls = 13;
optional uint64 objectID = 14;
optional uint64 updateID = 15;
+ optional uint64 parentID = 16;
+}
+
+message DirectoryInfo {
+ required string name = 1;
+ required uint64 creationTime = 2;
+ required uint64 modificationTime = 3;
+ repeated hadoop.hdds.KeyValue metadata = 4;
+ repeated OzoneAclInfo acls = 5;
+ required uint64 objectID = 6;
+ required uint64 updateID = 7;
+ required uint64 parentID = 8;
}
message RepeatedKeyInfo {
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
index c687a4b2290..b4fad19e2a9 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java
@@ -24,6 +24,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmPrefixInfo;
@@ -364,6 +365,12 @@ long countEstimatedRowsInTable(Table table)
Set getMultipartUploadKeys(String volumeName,
String bucketName, String prefix) throws IOException;
+ /**
+ * Gets the DirectoryTable.
+ * @return Table.
+ */
+ Table getDirectoryTable();
+
/**
* Return table mapped to the specified table name.
* @param tableName
@@ -382,4 +389,25 @@ Set getMultipartUploadKeys(String volumeName,
* @return table names in OM DB.
*/
Set listTableNames();
+
+ /**
+ * Given parent object id and path component name, return the corresponding
+ * DB 'prefixKey' key.
+ *
+ * @param parentObjectId - parent object Id
+ * @param pathComponentName - path component name
+ * @return DB directory key as String.
+ */
+ String getOzonePathKey(long parentObjectId, String pathComponentName);
+
+ /**
+ * Returns DB key name of an open file in OM metadata store. Should be
+ * #open# prefix followed by actual leaf node name.
+ *
+ * @param parentObjectId - parent object Id
+ * @param fileName - file name
+ * @param id - client id for this open request
+ * @return DB directory key as String.
+ */
+ String getOpenFileName(long parentObjectId, String fileName, long id);
}
diff --git a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
index a7e1eabee7b..8a284515931 100644
--- a/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
+++ b/hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java
@@ -30,6 +30,14 @@
/**
* Codec to encode OmKeyInfo as byte array.
+ *
+ *
+ * If the layout version "ozone.om.layout.version" is V1 and
+ * "ozone.om.enable.filesystem.paths" is TRUE. Then, DB stores only the leaf
+ * node name into the 'keyName' field.
+ *
+ * For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ * 'keyName' field stores only the leaf node name, which is 'file1'.
*/
public class OmKeyInfoCodec implements Codec {
private static final Logger LOG =
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index 7e79fe7b3de..91390559665 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -200,6 +200,10 @@ public void incNumKeys() {
numKeys.incr();
}
+ public void incNumKeys(long val) {
+ numKeys.incr(val);
+ }
+
public void decNumKeys() {
numKeys.incr(-1);
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index da7e98515a9..2788f8d883e 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -48,6 +48,7 @@
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.om.codec.OMTransactionInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmBucketInfoCodec;
+import org.apache.hadoop.ozone.om.codec.OmDirectoryInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmMultipartKeyInfoCodec;
import org.apache.hadoop.ozone.om.codec.OmPrefixInfoCodec;
@@ -59,6 +60,7 @@
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
@@ -70,6 +72,7 @@
import org.apache.hadoop.ozone.om.helpers.S3SecretValue;
import org.apache.hadoop.ozone.om.lock.OzoneManagerLock;
import org.apache.hadoop.ozone.om.ratis.OMTransactionInfo;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
import org.apache.hadoop.ozone.protocol.proto
.OzoneManagerProtocolProtos.UserVolumeInfo;
import org.apache.hadoop.ozone.security.OzoneTokenIdentifier;
@@ -125,6 +128,11 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
* |----------------------------------------------------------------------|
* | multipartInfoTable| /volumeName/bucketName/keyName/uploadId ->... |
* |----------------------------------------------------------------------|
+ * | directoryTable | parentId/directoryName -> DirectoryInfo |
+ * |----------------------------------------------------------------------|
+ * | fileTable | parentId/fileName -> KeyInfo |
+ * |----------------------------------------------------------------------|
+ * | openFileTable | parentId/fileName/id -> KeyInfo |
* |----------------------------------------------------------------------|
* | transactionInfoTable | #TRANSACTIONINFO -> OMTransactionInfo |
* |----------------------------------------------------------------------|
@@ -140,6 +148,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
public static final String S3_SECRET_TABLE = "s3SecretTable";
public static final String DELEGATION_TOKEN_TABLE = "dTokenTable";
public static final String PREFIX_TABLE = "prefixTable";
+ public static final String DIRECTORY_TABLE = "directoryTable";
+ public static final String FILE_TABLE = "fileTable";
+ public static final String OPEN_FILE_TABLE = "openFileTable";
public static final String TRANSACTION_INFO_TABLE =
"transactionInfoTable";
@@ -158,6 +169,9 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
private Table s3SecretTable;
private Table dTokenTable;
private Table prefixTable;
+ private Table dirTable;
+ private Table fileTable;
+ private Table openFileTable;
private Table transactionInfoTable;
private boolean isRatisEnabled;
private boolean ignorePipelineinKey;
@@ -186,7 +200,8 @@ public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
* For subclass overriding.
*/
protected OmMetadataManagerImpl() {
- this.lock = new OzoneManagerLock(new OzoneConfiguration());
+ OzoneConfiguration conf = new OzoneConfiguration();
+ this.lock = new OzoneManagerLock(conf);
this.openKeyExpireThresholdMS =
OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT;
}
@@ -212,6 +227,9 @@ public Table getBucketTable() {
@Override
public Table getKeyTable() {
+ if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+ return fileTable;
+ }
return keyTable;
}
@@ -222,6 +240,9 @@ public Table getDeletedTable() {
@Override
public Table getOpenKeyTable() {
+ if (OzoneManagerRatisUtils.isOmLayoutVersionV1()) {
+ return openFileTable;
+ }
return openKeyTable;
}
@@ -230,6 +251,11 @@ public Table getPrefixTable() {
return prefixTable;
}
+ @Override
+ public Table getDirectoryTable() {
+ return dirTable;
+ }
+
@Override
public Table getMultipartInfoTable() {
return multipartInfoTable;
@@ -323,6 +349,9 @@ protected static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) {
.addTable(DELEGATION_TOKEN_TABLE)
.addTable(S3_SECRET_TABLE)
.addTable(PREFIX_TABLE)
+ .addTable(DIRECTORY_TABLE)
+ .addTable(FILE_TABLE)
+ .addTable(OPEN_FILE_TABLE)
.addTable(TRANSACTION_INFO_TABLE)
.addCodec(OzoneTokenIdentifier.class, new TokenIdentifierCodec())
.addCodec(OmKeyInfo.class, new OmKeyInfoCodec(true))
@@ -334,6 +363,7 @@ protected static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) {
.addCodec(OmMultipartKeyInfo.class, new OmMultipartKeyInfoCodec())
.addCodec(S3SecretValue.class, new S3SecretValueCodec())
.addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec())
+ .addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec())
.addCodec(OMTransactionInfo.class, new OMTransactionInfoCodec());
}
@@ -388,6 +418,18 @@ protected void initializeOmTables() throws IOException {
OmPrefixInfo.class);
checkTableStatus(prefixTable, PREFIX_TABLE);
+ dirTable = this.store.getTable(DIRECTORY_TABLE, String.class,
+ OmDirectoryInfo.class);
+ checkTableStatus(dirTable, DIRECTORY_TABLE);
+
+ fileTable = this.store.getTable(FILE_TABLE, String.class,
+ OmKeyInfo.class);
+ checkTableStatus(fileTable, FILE_TABLE);
+
+ openFileTable = this.store.getTable(OPEN_FILE_TABLE, String.class,
+ OmKeyInfo.class);
+ checkTableStatus(openFileTable, OPEN_FILE_TABLE);
+
transactionInfoTable = this.store.getTable(TRANSACTION_INFO_TABLE,
String.class, OMTransactionInfo.class);
checkTableStatus(transactionInfoTable, TRANSACTION_INFO_TABLE);
@@ -1135,4 +1177,21 @@ public Set listTableNames() {
return tableMap.keySet();
}
+ @Override
+ public String getOzonePathKey(long parentObjectId, String pathComponentName) {
+ StringBuilder builder = new StringBuilder();
+ builder.append(parentObjectId);
+ builder.append(OM_KEY_PREFIX).append(pathComponentName);
+ return builder.toString();
+ }
+
+ @Override
+ public String getOpenFileName(long parentID, String fileName,
+ long id) {
+ StringBuilder openKey = new StringBuilder();
+ openKey.append(parentID);
+ openKey.append(OM_KEY_PREFIX).append(fileName);
+ openKey.append(OM_KEY_PREFIX).append(id);
+ return openKey.toString();
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 212a8e17ef1..a5bda986bd3 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -215,6 +215,8 @@
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_KEYTAB_FILE_KEY;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_KERBEROS_PRINCIPAL_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION;
+import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_USER_MAX_VOLUME;
@@ -1138,6 +1140,10 @@ public void start() throws IOException {
omRatisServer.start();
}
+ // TODO: Temporary workaround for OM upgrade path and will be replaced once
+ // upgrade HDDS-3698 story reaches consensus.
+ getOMLayoutVersion();
+
metadataManager.start(configuration);
startSecretManagerIfNecessary();
@@ -3569,6 +3575,14 @@ public boolean getEnableFileSystemPaths() {
OZONE_OM_ENABLE_FILESYSTEM_PATHS_DEFAULT);
}
+ private void getOMLayoutVersion() {
+ String version = configuration.getTrimmed(OZONE_OM_LAYOUT_VERSION,
+ OZONE_OM_LAYOUT_VERSION_DEFAULT);
+ boolean omLayoutVersionV1 =
+ StringUtils.equalsIgnoreCase(version, "V1");
+ OzoneManagerRatisUtils.setOmLayoutVersionV1(omLayoutVersionV1);
+ }
+
/**
* Create volume which is required for S3Gateway operations.
* @throws IOException
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
new file mode 100644
index 00000000000..ba592a9156d
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/codec/OmDirectoryInfoCodec.java
@@ -0,0 +1,60 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.codec;
+
+import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
+import org.apache.hadoop.hdds.utils.db.Codec;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DirectoryInfo;
+
+import java.io.IOException;
+
+/**
+ * Codec to encode OmDirectoryInfo as byte array.
+ */
+public class OmDirectoryInfoCodec implements Codec {
+
+ @Override
+ public byte[] toPersistedFormat(OmDirectoryInfo object) throws IOException {
+ Preconditions
+ .checkNotNull(object, "Null object can't be converted " +
+ "to byte array.");
+ return object.getProtobuf().toByteArray();
+ }
+
+ @Override
+ public OmDirectoryInfo fromPersistedFormat(byte[] rawData)
+ throws IOException {
+ Preconditions
+ .checkNotNull(rawData,
+ "Null byte array can't converted to real object.");
+ try {
+ return OmDirectoryInfo.getFromProtobuf(DirectoryInfo.parseFrom(rawData));
+ } catch (InvalidProtocolBufferException e) {
+ throw new IllegalArgumentException(
+ "Can't encode the the raw data from the byte array", e);
+ }
+ }
+
+ @Override
+ public OmDirectoryInfo copyObject(OmDirectoryInfo object) {
+ return object.copyObject();
+ }
+}
+
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index 681c0da87e6..d2dd5c7e07b 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -33,14 +33,18 @@
import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketRemoveAclRequest;
import org.apache.hadoop.ozone.om.request.bucket.acl.OMBucketSetAclRequest;
import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequest;
+import org.apache.hadoop.ozone.om.request.file.OMFileCreateRequestV1;
import org.apache.hadoop.ozone.om.request.key.OMKeysDeleteRequest;
import org.apache.hadoop.ozone.om.request.key.OMAllocateBlockRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyCommitRequestV1;
import org.apache.hadoop.ozone.om.request.key.OMKeyCreateRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyDeleteRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyPurgeRequest;
import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequest;
+import org.apache.hadoop.ozone.om.request.key.OMKeyRenameRequestV1;
import org.apache.hadoop.ozone.om.request.key.OMKeysRenameRequest;
import org.apache.hadoop.ozone.om.request.key.OMTrashRecoverRequest;
import org.apache.hadoop.ozone.om.request.key.acl.OMKeyAddAclRequest;
@@ -83,8 +87,22 @@
*/
public final class OzoneManagerRatisUtils {
+ // TODO: Temporary workaround for OM upgrade path and will be replaced once
+ // upgrade HDDS-3698 story reaches consensus.
+ private static boolean omLayoutVersionV1 = false;
+
private OzoneManagerRatisUtils() {
}
+
+ /**
+ * Sets layout version.
+ *
+ * @param layoutVersionV1 om layout version
+ */
+ public static void setOmLayoutVersionV1(boolean layoutVersionV1) {
+ OzoneManagerRatisUtils.omLayoutVersionV1 = layoutVersionV1;
+ }
+
/**
* Create OMClientRequest which encapsulates the OMRequest.
* @param omRequest
@@ -123,18 +141,30 @@ public static OMClientRequest createClientRequest(OMRequest omRequest) {
case CreateKey:
return new OMKeyCreateRequest(omRequest);
case CommitKey:
+ if (omLayoutVersionV1) {
+ return new OMKeyCommitRequestV1(omRequest);
+ }
return new OMKeyCommitRequest(omRequest);
case DeleteKey:
return new OMKeyDeleteRequest(omRequest);
case DeleteKeys:
return new OMKeysDeleteRequest(omRequest);
case RenameKey:
+ if (omLayoutVersionV1) {
+ return new OMKeyRenameRequestV1(omRequest);
+ }
return new OMKeyRenameRequest(omRequest);
case RenameKeys:
return new OMKeysRenameRequest(omRequest);
case CreateDirectory:
+ if (omLayoutVersionV1) {
+ return new OMDirectoryCreateRequestV1(omRequest);
+ }
return new OMDirectoryCreateRequest(omRequest);
case CreateFile:
+ if (omLayoutVersionV1) {
+ return new OMFileCreateRequestV1(omRequest);
+ }
return new OMFileCreateRequest(omRequest);
case PurgeKeys:
return new OMKeyPurgeRequest(omRequest);
@@ -308,4 +338,13 @@ public static boolean verifyTransactionInfo(
return true;
}
+
+ /**
+ * Returns layout version flag represents V1.
+ * @return
+ */
+ public static boolean isOmLayoutVersionV1() {
+ return omLayoutVersionV1;
+ }
+
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
index fd303e7f09a..1f026ef3fe1 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java
@@ -20,14 +20,18 @@
import java.io.IOException;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.OzoneAcl;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
@@ -156,6 +160,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
getOmRequest());
OmBucketInfo omBucketInfo = OmBucketInfo.getFromProtobuf(bucketInfo);
+ // Add layout version V1 to bucket info
+ addLayoutVersionToBucket(ozoneManager, omBucketInfo);
+
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
@@ -248,6 +255,23 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
}
}
+ private void addLayoutVersionToBucket(OzoneManager ozoneManager,
+ OmBucketInfo omBucketInfo) {
+ Map metadata = omBucketInfo.getMetadata();
+ if (metadata == null) {
+ metadata = new HashMap<>();
+ }
+ OzoneConfiguration configuration = ozoneManager.getConfiguration();
+ // TODO: Many unit test cases has null config and done a simple null
+ // check now. It can be done later, to avoid massive test code changes.
+ if (configuration != null) {
+ String layOutVersion = configuration
+ .get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION,
+ OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT);
+ metadata.put(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, layOutVersion);
+ omBucketInfo.setMetadata(metadata);
+ }
+ }
/**
* Add default acls for bucket. These acls are inherited from volume
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
new file mode 100644
index 00000000000..43670188c12
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestV1.java
@@ -0,0 +1,315 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMDirectoryCreateResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .CreateDirectoryResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
+ .Status;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_KEY_NAME;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*;
+
+/**
+ * Handle create directory request. It will add path components to the directory
+ * table and maintains file system semantics.
+ */
+public class OMDirectoryCreateRequestV1 extends OMDirectoryCreateRequest {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OMDirectoryCreateRequestV1.class);
+
+ public OMDirectoryCreateRequestV1(OMRequest omRequest) {
+ super(omRequest);
+ }
+
+ @Override
+ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+ long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+ CreateDirectoryRequest createDirectoryRequest = getOmRequest()
+ .getCreateDirectoryRequest();
+ KeyArgs keyArgs = createDirectoryRequest.getKeyArgs();
+
+ String volumeName = keyArgs.getVolumeName();
+ String bucketName = keyArgs.getBucketName();
+ String keyName = keyArgs.getKeyName();
+ int numKeysCreated = 0;
+
+ OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+ getOmRequest());
+ omResponse.setCreateDirectoryResponse(CreateDirectoryResponse.newBuilder());
+ OMMetrics omMetrics = ozoneManager.getMetrics();
+ omMetrics.incNumCreateDirectory();
+
+ AuditLogger auditLogger = ozoneManager.getAuditLogger();
+ OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo();
+
+ Map auditMap = buildKeyArgsAuditMap(keyArgs);
+ OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+ boolean acquiredLock = false;
+ IOException exception = null;
+ OMClientResponse omClientResponse = null;
+ Result result = Result.FAILURE;
+ List missingParentInfos;
+
+ try {
+ keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+ volumeName = keyArgs.getVolumeName();
+ bucketName = keyArgs.getBucketName();
+
+ // check Acl
+ checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+ IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+ // Check if this is the root of the filesystem.
+ if (keyName.length() == 0) {
+ throw new OMException("Directory create failed. Cannot create " +
+ "directory at root of the filesystem",
+ OMException.ResultCodes.CANNOT_CREATE_DIRECTORY_AT_ROOT);
+ }
+ // acquire lock
+ acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+ volumeName, bucketName);
+
+ validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+ Path keyPath = Paths.get(keyName);
+
+ // Need to check if any files exist in the given path, if they exist we
+ // cannot create a directory with the given key.
+ // Verify the path against directory table
+ OMFileRequest.OMPathInfoV1 omPathInfo =
+ OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager, volumeName,
+ bucketName, keyName, keyPath);
+ OMFileRequest.OMDirectoryResult omDirectoryResult =
+ omPathInfo.getDirectoryResult();
+
+ if (omDirectoryResult == FILE_EXISTS ||
+ omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+ throw new OMException("Unable to create directory: " + keyName
+ + " in volume/bucket: " + volumeName + "/" + bucketName + " as " +
+ "file:" + omPathInfo.getFileExistsInPath() + " already exists",
+ FILE_ALREADY_EXISTS);
+ } else if (omDirectoryResult == DIRECTORY_EXISTS_IN_GIVENPATH ||
+ omDirectoryResult == NONE) {
+
+ // prepare all missing parents
+ missingParentInfos =
+ OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+ ozoneManager, keyArgs, omPathInfo, trxnLogIndex);
+
+ // prepare leafNode dir
+ OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(
+ omPathInfo.getLeafNodeName(),
+ keyArgs, omPathInfo.getLeafNodeObjectId(),
+ omPathInfo.getLastKnownParentId(), trxnLogIndex,
+ OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+ OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+ Optional.of(dirInfo), Optional.of(missingParentInfos),
+ trxnLogIndex);
+
+ // total number of keys created.
+ numKeysCreated = missingParentInfos.size() + 1;
+
+ result = OMDirectoryCreateRequest.Result.SUCCESS;
+ omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
+ dirInfo, missingParentInfos, result);
+ } else {
+ result = Result.DIRECTORY_ALREADY_EXISTS;
+ omResponse.setStatus(Status.DIRECTORY_ALREADY_EXISTS);
+ omClientResponse = new OMDirectoryCreateResponseV1(omResponse.build(),
+ result);
+ }
+ } catch (IOException ex) {
+ exception = ex;
+ omClientResponse = new OMDirectoryCreateResponseV1(
+ createErrorOMResponse(omResponse, exception), result);
+ } finally {
+ addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+ omDoubleBufferHelper);
+ if (acquiredLock) {
+ omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+ bucketName);
+ }
+ }
+
+ auditLog(auditLogger, buildAuditMessage(OMAction.CREATE_DIRECTORY,
+ auditMap, exception, userInfo));
+
+ logResult(createDirectoryRequest, keyArgs, omMetrics, numKeysCreated,
+ result, exception);
+
+ return omClientResponse;
+ }
+
+ private void logResult(CreateDirectoryRequest createDirectoryRequest,
+ KeyArgs keyArgs, OMMetrics omMetrics, int numKeys,
+ Result result,
+ IOException exception) {
+
+ String volumeName = keyArgs.getVolumeName();
+ String bucketName = keyArgs.getBucketName();
+ String keyName = keyArgs.getKeyName();
+
+ switch (result) {
+ case SUCCESS:
+ omMetrics.incNumKeys(numKeys);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Directory created. Volume:{}, Bucket:{}, Key:{}",
+ volumeName, bucketName, keyName);
+ }
+ break;
+ case DIRECTORY_ALREADY_EXISTS:
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Directory already exists. Volume:{}, Bucket:{}, Key{}",
+ volumeName, bucketName, keyName, exception);
+ }
+ break;
+ case FAILURE:
+ omMetrics.incNumCreateDirectoryFails();
+ LOG.error("Directory creation failed. Volume:{}, Bucket:{}, Key{}. " +
+ "Exception:{}", volumeName, bucketName, keyName, exception);
+ break;
+ default:
+ LOG.error("Unrecognized Result for OMDirectoryCreateRequest: {}",
+ createDirectoryRequest);
+ }
+ }
+
+ /**
+ * Construct OmDirectoryInfo for every parent directory in missing list.
+ *
+ * @param ozoneManager Ozone Manager
+ * @param keyArgs key arguments
+ * @param pathInfo list of parent directories to be created and its ACLs
+ * @param trxnLogIndex transaction log index id
+ * @return list of missing parent directories
+ * @throws IOException DB failure
+ */
+ public static List getAllMissingParentDirInfo(
+ OzoneManager ozoneManager, KeyArgs keyArgs,
+ OMFileRequest.OMPathInfoV1 pathInfo, long trxnLogIndex)
+ throws IOException {
+ OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+ List missingParentInfos = new ArrayList<>();
+
+ ImmutablePair objIdRange = OMFileRequest
+ .getObjIdRangeFromTxId(trxnLogIndex);
+ long baseObjId = objIdRange.getLeft();
+ long maxObjId = objIdRange.getRight();
+ long maxLevels = maxObjId - baseObjId;
+ long objectCount = 1;
+
+ String volumeName = keyArgs.getVolumeName();
+ String bucketName = keyArgs.getBucketName();
+ String keyName = keyArgs.getKeyName();
+
+ long lastKnownParentId = pathInfo.getLastKnownParentId();
+ List missingParents = pathInfo.getMissingParents();
+ List inheritAcls = pathInfo.getAcls();
+ for (String missingKey : missingParents) {
+ long nextObjId = baseObjId + objectCount;
+ if (nextObjId > maxObjId) {
+ throw new OMException("Too many directories in path. Exceeds limit of "
+ + maxLevels + ". Unable to create directory: " + keyName
+ + " in volume/bucket: " + volumeName + "/" + bucketName,
+ INVALID_KEY_NAME);
+ }
+
+ LOG.debug("missing parent {} getting added to DirectoryTable",
+ missingKey);
+ OmDirectoryInfo dirInfo = createDirectoryInfoWithACL(missingKey,
+ keyArgs, nextObjId, lastKnownParentId, trxnLogIndex, inheritAcls);
+ objectCount++;
+
+ missingParentInfos.add(dirInfo);
+
+ // updating id for the next sub-dir
+ lastKnownParentId = nextObjId;
+ }
+ pathInfo.setLastKnownParentId(lastKnownParentId);
+ pathInfo.setLeafNodeObjectId(baseObjId + objectCount);
+ return missingParentInfos;
+ }
+
+ /**
+ * Fill in a DirectoryInfo for a new directory entry in OM database.
+ * without initializing ACLs from the KeyArgs - used for intermediate
+ * directories which get created internally/recursively during file
+ * and directory create.
+ * @param dirName
+ * @param keyArgs
+ * @param objectId
+ * @param parentObjectId
+ * @param inheritAcls
+ * @return the OmDirectoryInfo structure
+ */
+ public static OmDirectoryInfo createDirectoryInfoWithACL(
+ String dirName, KeyArgs keyArgs, long objectId,
+ long parentObjectId, long transactionIndex,
+ List inheritAcls) {
+
+ return OmDirectoryInfo.newBuilder()
+ .setName(dirName)
+ .setCreationTime(keyArgs.getModificationTime())
+ .setModificationTime(keyArgs.getModificationTime())
+ .setObjectID(objectId)
+ .setUpdateID(transactionIndex)
+ .setParentObjectID(parentObjectId)
+ .setAcls(inheritAcls).build();
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
index 9a7f31aece9..7d104e479c6 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java
@@ -234,23 +234,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
List inheritAcls = pathInfo.getAcls();
// Check if a file or directory exists with same key name.
- if (omDirectoryResult == FILE_EXISTS) {
- if (!isOverWrite) {
- throw new OMException("File " + keyName + " already exists",
- OMException.ResultCodes.FILE_ALREADY_EXISTS);
- }
- } else if (omDirectoryResult == DIRECTORY_EXISTS) {
- throw new OMException("Can not write to directory: " + keyName,
- OMException.ResultCodes.NOT_A_FILE);
- } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
- throw new OMException(
- "Can not create file: " + keyName + " as there " +
- "is already file in the given path",
- OMException.ResultCodes.NOT_A_FILE);
- }
+ checkDirectoryResult(keyName, isOverWrite, omDirectoryResult);
if (!isRecursive) {
- checkAllParentsExist(ozoneManager, keyArgs, pathInfo);
+ checkAllParentsExist(keyArgs, pathInfo);
}
// do open key
@@ -351,8 +338,40 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
return omClientResponse;
}
- private void checkAllParentsExist(OzoneManager ozoneManager,
- KeyArgs keyArgs,
+ /**
+ * Verify om directory result.
+ *
+ * @param keyName key name
+ * @param isOverWrite flag represents whether file can be overwritten
+ * @param omDirectoryResult directory result
+ * @throws OMException if file or directory or file exists in the given path
+ */
+ protected void checkDirectoryResult(String keyName, boolean isOverWrite,
+ OMFileRequest.OMDirectoryResult omDirectoryResult) throws OMException {
+ if (omDirectoryResult == FILE_EXISTS) {
+ if (!isOverWrite) {
+ throw new OMException("File " + keyName + " already exists",
+ OMException.ResultCodes.FILE_ALREADY_EXISTS);
+ }
+ } else if (omDirectoryResult == DIRECTORY_EXISTS) {
+ throw new OMException("Can not write to directory: " + keyName,
+ OMException.ResultCodes.NOT_A_FILE);
+ } else if (omDirectoryResult == FILE_EXISTS_IN_GIVENPATH) {
+ throw new OMException(
+ "Can not create file: " + keyName + " as there " +
+ "is already file in the given path",
+ OMException.ResultCodes.NOT_A_FILE);
+ }
+ }
+
+ /**
+ * Verify the existence of parent directory.
+ *
+ * @param keyArgs key arguments
+ * @param pathInfo om path info
+ * @throws IOException directory not found
+ */
+ protected void checkAllParentsExist(KeyArgs keyArgs,
OMFileRequest.OMPathInfo pathInfo) throws IOException {
String keyName = keyArgs.getKeyName();
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
new file mode 100644
index 00000000000..e46416beba7
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java
@@ -0,0 +1,259 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse;
+import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles create file request layout version1.
+ */
+public class OMFileCreateRequestV1 extends OMFileCreateRequest {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OMFileCreateRequestV1.class);
+ public OMFileCreateRequestV1(OMRequest omRequest) {
+ super(omRequest);
+ }
+
+ @Override
+ @SuppressWarnings("methodlength")
+ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+ long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+ CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest();
+ KeyArgs keyArgs = createFileRequest.getKeyArgs();
+ Map auditMap = buildKeyArgsAuditMap(keyArgs);
+
+ String volumeName = keyArgs.getVolumeName();
+ String bucketName = keyArgs.getBucketName();
+ String keyName = keyArgs.getKeyName();
+
+ // if isRecursive is true, file would be created even if parent
+ // directories does not exist.
+ boolean isRecursive = createFileRequest.getIsRecursive();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("File create for : " + volumeName + "/" + bucketName + "/"
+ + keyName + ":" + isRecursive);
+ }
+
+ // if isOverWrite is true, file would be over written.
+ boolean isOverWrite = createFileRequest.getIsOverwrite();
+
+ OMMetrics omMetrics = ozoneManager.getMetrics();
+ omMetrics.incNumCreateFile();
+
+ OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+ boolean acquiredLock = false;
+
+ OmVolumeArgs omVolumeArgs = null;
+ OmBucketInfo omBucketInfo = null;
+ final List locations = new ArrayList<>();
+ List missingParentInfos;
+ int numKeysCreated = 0;
+
+ OMClientResponse omClientResponse = null;
+ OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+ getOmRequest());
+ IOException exception = null;
+ Result result = null;
+ try {
+ keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+ volumeName = keyArgs.getVolumeName();
+ bucketName = keyArgs.getBucketName();
+
+ if (keyName.length() == 0) {
+ // Check if this is the root of the filesystem.
+ throw new OMException("Can not write to directory: " + keyName,
+ OMException.ResultCodes.NOT_A_FILE);
+ }
+
+ // check Acl
+ checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
+ IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+ // acquire lock
+ acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+ volumeName, bucketName);
+
+ validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+ OmKeyInfo dbFileInfo = null;
+
+ OMFileRequest.OMPathInfoV1 pathInfoV1 =
+ OMFileRequest.verifyDirectoryKeysInPath(omMetadataManager,
+ volumeName, bucketName, keyName, Paths.get(keyName));
+
+ if (pathInfoV1.getDirectoryResult()
+ == OMFileRequest.OMDirectoryResult.FILE_EXISTS) {
+ String dbFileKey = omMetadataManager.getOzonePathKey(
+ pathInfoV1.getLastKnownParentId(),
+ pathInfoV1.getLeafNodeName());
+ dbFileInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+ omMetadataManager, dbFileKey, keyName);
+ }
+
+ // check if the file or directory already existed in OM
+ checkDirectoryResult(keyName, isOverWrite,
+ pathInfoV1.getDirectoryResult());
+
+ if (!isRecursive) {
+ checkAllParentsExist(keyArgs, pathInfoV1);
+ }
+
+ // add all missing parents to dir table
+ missingParentInfos =
+ OMDirectoryCreateRequestV1.getAllMissingParentDirInfo(
+ ozoneManager, keyArgs, pathInfoV1, trxnLogIndex);
+
+ // total number of keys created.
+ numKeysCreated = missingParentInfos.size();
+
+ // do open key
+ OmBucketInfo bucketInfo = omMetadataManager.getBucketTable().get(
+ omMetadataManager.getBucketKey(volumeName, bucketName));
+
+ OmKeyInfo omFileInfo = prepareFileInfo(omMetadataManager, keyArgs,
+ dbFileInfo, keyArgs.getDataSize(), locations,
+ getFileEncryptionInfo(keyArgs), ozoneManager.getPrefixManager(),
+ bucketInfo, pathInfoV1, trxnLogIndex,
+ ozoneManager.isRatisEnabled());
+
+ long openVersion = omFileInfo.getLatestVersionLocations().getVersion();
+ long clientID = createFileRequest.getClientID();
+ String dbOpenFileName = omMetadataManager.getOpenFileName(
+ pathInfoV1.getLastKnownParentId(), pathInfoV1.getLeafNodeName(),
+ clientID);
+
+ // Append new blocks
+ List newLocationList = keyArgs.getKeyLocationsList()
+ .stream().map(OmKeyLocationInfo::getFromProtobuf)
+ .collect(Collectors.toList());
+ omFileInfo.appendNewBlocks(newLocationList, false);
+
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
+ // check volume quota
+ long preAllocatedSpace = newLocationList.size()
+ * ozoneManager.getScmBlockSize()
+ * omFileInfo.getFactor().getNumber();
+ checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace);
+
+ // Add to cache entry can be done outside of lock for this openKey.
+ // Even if bucket gets deleted, when commitKey we shall identify if
+ // bucket gets deleted.
+ OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
+ dbOpenFileName, omFileInfo, pathInfoV1.getLeafNodeName(),
+ trxnLogIndex);
+
+ // Add cache entries for the prefix directories.
+ // Skip adding for the file key itself, until Key Commit.
+ OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager,
+ Optional.absent(), Optional.of(missingParentInfos),
+ trxnLogIndex);
+
+ // update usedBytes atomically.
+ omVolumeArgs.getUsedBytes().add(preAllocatedSpace);
+ omBucketInfo.getUsedBytes().add(preAllocatedSpace);
+
+ // Prepare response. Sets user given full key name in the 'keyName'
+ // attribute in response object.
+ omResponse.setCreateFileResponse(CreateFileResponse.newBuilder()
+ .setKeyInfo(omFileInfo.getProtobuf(keyName))
+ .setID(clientID)
+ .setOpenVersion(openVersion).build())
+ .setCmdType(Type.CreateFile);
+ omClientResponse = new OMFileCreateResponseV1(omResponse.build(),
+ omFileInfo, missingParentInfos, clientID, omVolumeArgs,
+ omBucketInfo);
+
+ result = Result.SUCCESS;
+ } catch (IOException ex) {
+ result = Result.FAILURE;
+ exception = ex;
+ omMetrics.incNumCreateFileFails();
+ omResponse.setCmdType(Type.CreateFile);
+ omClientResponse = new OMFileCreateResponse(createErrorOMResponse(
+ omResponse, exception));
+ } finally {
+ addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+ omDoubleBufferHelper);
+ if (acquiredLock) {
+ omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+ bucketName);
+ }
+ }
+
+ // Audit Log outside the lock
+ auditLog(ozoneManager.getAuditLogger(), buildAuditMessage(
+ OMAction.CREATE_FILE, auditMap, exception,
+ getOmRequest().getUserInfo()));
+
+ switch (result) {
+ case SUCCESS:
+ omMetrics.incNumKeys(numKeysCreated);
+ LOG.debug("File created. Volume:{}, Bucket:{}, Key:{}", volumeName,
+ bucketName, keyName);
+ break;
+ case FAILURE:
+ LOG.error("File create failed. Volume:{}, Bucket:{}, Key{}.",
+ volumeName, bucketName, keyName, exception);
+ break;
+ default:
+ LOG.error("Unrecognized Result for OMFileCreateRequest: {}",
+ createFileRequest);
+ }
+
+ return omClientResponse;
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index 21ffff815e0..86811d9d25d 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -20,17 +20,30 @@
import java.io.IOException;
import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.ArrayList;
+import java.util.Collections;
+import java.util.Iterator;
import java.util.List;
import com.google.common.base.Optional;
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.tuple.ImmutablePair;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
import org.apache.hadoop.ozone.OzoneAcl;
+import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.jetbrains.annotations.NotNull;
+import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -129,6 +142,117 @@ public static OMPathInfo verifyFilesInPath(
return new OMPathInfo(missing, OMDirectoryResult.NONE, inheritAcls);
}
+ /**
+ * Verify any dir/key exist in the given path in the specified
+ * volume/bucket by iterating through directory table.
+ *
+ * @param omMetadataManager OM Metadata manager
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param keyName key name
+ * @param keyPath path
+ * @return OMPathInfoV1 path info object
+ * @throws IOException on DB failure
+ */
+ public static OMPathInfoV1 verifyDirectoryKeysInPath(
+ @Nonnull OMMetadataManager omMetadataManager,
+ @Nonnull String volumeName,
+ @Nonnull String bucketName, @Nonnull String keyName,
+ @Nonnull Path keyPath) throws IOException {
+
+ String leafNodeName = OzoneFSUtils.getFileName(keyName);
+ List missing = new ArrayList<>();
+
+ // Found no files/ directories in the given path.
+ OMDirectoryResult result = OMDirectoryResult.NONE;
+
+ Iterator elements = keyPath.iterator();
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ // by default, inherit bucket ACLs
+ List inheritAcls = omBucketInfo.getAcls();
+
+ long lastKnownParentId = omBucketInfo.getObjectID();
+ String dbDirName = ""; // absolute path for trace logs
+ // for better logging
+ StringBuilder fullKeyPath = new StringBuilder(bucketKey);
+ while (elements.hasNext()) {
+ String fileName = elements.next().toString();
+ fullKeyPath.append(OzoneConsts.OM_KEY_PREFIX);
+ fullKeyPath.append(fileName);
+ if (missing.size() > 0) {
+ // Add all the sub-dirs to the missing list except the leaf element.
+ // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt.
+ // Assume /vol1/buck1/a/b/c exists, then add d, e, f into missing list.
+ if(elements.hasNext()){
+ // skips leaf node.
+ missing.add(fileName);
+ }
+ continue;
+ }
+
+ // For example, /vol1/buck1/a/b/c/d/e/f/file1.txt
+ // 1. Do lookup on directoryTable. If not exists goto next step.
+ // 2. Do look on keyTable. If not exists goto next step.
+ // 3. Add 'sub-dir' to missing parents list
+ String dbNodeName = omMetadataManager.getOzonePathKey(
+ lastKnownParentId, fileName);
+ OmDirectoryInfo omDirInfo = omMetadataManager.getDirectoryTable().
+ get(dbNodeName);
+ if (omDirInfo != null) {
+ dbDirName += omDirInfo.getName() + OzoneConsts.OZONE_URI_DELIMITER;
+ if (elements.hasNext()) {
+ result = OMDirectoryResult.DIRECTORY_EXISTS_IN_GIVENPATH;
+ lastKnownParentId = omDirInfo.getObjectID();
+ inheritAcls = omDirInfo.getAcls();
+ continue;
+ } else {
+ // Checked all the sub-dirs till the leaf node.
+ // Found a directory in the given path.
+ result = OMDirectoryResult.DIRECTORY_EXISTS;
+ }
+ } else {
+ // Get parentID from the lastKnownParent. For any files, directly under
+ // the bucket, the parent is the bucketID. Say, "/vol1/buck1/file1"
+ // TODO: Need to add UT for this case along with OMFileCreateRequest.
+ if (omMetadataManager.getKeyTable().isExist(dbNodeName)) {
+ if (elements.hasNext()) {
+ // Found a file in the given key name.
+ result = OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH;
+ } else {
+ // Checked all the sub-dirs till the leaf file.
+ // Found a file with the given key name.
+ result = OMDirectoryResult.FILE_EXISTS;
+ }
+ break; // Skip directory traversal as it hits key.
+ }
+
+ // Add to missing list, there is no such file/directory with given name.
+ if (elements.hasNext()) {
+ missing.add(fileName);
+ }
+ }
+ }
+
+ LOG.trace("verifyFiles/Directories in Path : " + "/" + volumeName
+ + "/" + bucketName + "/" + keyName + ":" + result);
+
+ if (result == OMDirectoryResult.FILE_EXISTS_IN_GIVENPATH || result ==
+ OMDirectoryResult.FILE_EXISTS) {
+ return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+ result, inheritAcls, fullKeyPath.toString());
+ }
+
+ String dbDirKeyName = omMetadataManager.getOzoneDirKey(volumeName,
+ bucketName, dbDirName);
+ LOG.trace("Acls inherited from parent " + dbDirKeyName + " are : "
+ + inheritAcls);
+
+ return new OMPathInfoV1(leafNodeName, lastKnownParentId, missing,
+ result, inheritAcls);
+ }
+
/**
* Get the valid base object id given the transaction id.
* @param id of the transaction
@@ -156,6 +280,59 @@ public static ImmutablePair getObjIdRangeFromTxId(long id) {
return new ImmutablePair<>(baseId, maxAvailableId);
}
+
+ /**
+ * Class to return the results from verifyDirectoryKeysInPath.
+ * Includes the list of missing intermediate directories and
+ * the directory search result code.
+ */
+ public static class OMPathInfoV1 extends OMPathInfo{
+ private String leafNodeName;
+ private long lastKnownParentId;
+ private long leafNodeObjectId;
+ private String fileExistsInPath;
+
+ public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+ List missingParents, OMDirectoryResult result,
+ List aclList, String fileExistsInPath) {
+ super(missingParents, result, aclList);
+ this.leafNodeName = leafNodeName;
+ this.lastKnownParentId = lastKnownParentId;
+ this.fileExistsInPath = fileExistsInPath;
+ }
+
+ public OMPathInfoV1(String leafNodeName, long lastKnownParentId,
+ List missingParents, OMDirectoryResult result,
+ List aclList) {
+ this(leafNodeName, lastKnownParentId, missingParents, result, aclList,
+ "");
+ }
+
+ public String getLeafNodeName() {
+ return leafNodeName;
+ }
+
+ public long getLeafNodeObjectId() {
+ return leafNodeObjectId;
+ }
+
+ public void setLeafNodeObjectId(long leafNodeObjectId) {
+ this.leafNodeObjectId = leafNodeObjectId;
+ }
+
+ public void setLastKnownParentId(long lastKnownParentId) {
+ this.lastKnownParentId = lastKnownParentId;
+ }
+
+ public long getLastKnownParentId() {
+ return lastKnownParentId;
+ }
+
+ public String getFileExistsInPath() {
+ return fileExistsInPath;
+ }
+ }
+
/**
* Class to return the results from verifyFilesInPath.
* Includes the list of missing intermediate directories and
@@ -254,4 +431,338 @@ public static void addKeyTableCacheEntries(
new CacheValue<>(keyInfo, index));
}
}
+
+ /**
+ * Adding directory info to the Table cache.
+ *
+ * @param omMetadataManager OM Metadata Manager
+ * @param dirInfo directory info
+ * @param missingParentInfos list of the parents to be added to DB
+ * @param trxnLogIndex transaction log index
+ */
+ public static void addDirectoryTableCacheEntries(
+ OMMetadataManager omMetadataManager,
+ Optional dirInfo,
+ Optional> missingParentInfos,
+ long trxnLogIndex) {
+ for (OmDirectoryInfo subDirInfo : missingParentInfos.get()) {
+ omMetadataManager.getDirectoryTable().addCacheEntry(
+ new CacheKey<>(omMetadataManager.getOzonePathKey(
+ subDirInfo.getParentObjectID(), subDirInfo.getName())),
+ new CacheValue<>(Optional.of(subDirInfo), trxnLogIndex));
+ }
+
+ if (dirInfo.isPresent()) {
+ omMetadataManager.getDirectoryTable().addCacheEntry(
+ new CacheKey<>(omMetadataManager.getOzonePathKey(
+ dirInfo.get().getParentObjectID(),
+ dirInfo.get().getName())),
+ new CacheValue<>(dirInfo, trxnLogIndex));
+ }
+ }
+
+ /**
+ * Adding Key info to the openFile Table cache.
+ *
+ * @param omMetadataManager OM Metadata Manager
+ * @param dbOpenFileName open file name key
+ * @param omFileInfo key info
+ * @param fileName file name
+ * @param trxnLogIndex transaction log index
+ * @return dbOmFileInfo, which keeps leaf node name in keyName field
+ */
+ public static void addOpenFileTableCacheEntry(
+ OMMetadataManager omMetadataManager, String dbOpenFileName,
+ @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+ Optional keyInfoOptional = Optional.absent();
+ if (omFileInfo != null) {
+ // New key format for the openFileTable.
+ // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ // keyName field stores only the leaf node name, which is 'file1'.
+ omFileInfo.setKeyName(fileName);
+ keyInfoOptional = Optional.of(omFileInfo);
+ }
+
+ omMetadataManager.getOpenKeyTable().addCacheEntry(
+ new CacheKey<>(dbOpenFileName),
+ new CacheValue<>(keyInfoOptional, trxnLogIndex));
+ }
+
+ /**
+ * Adding Key info to the file table cache.
+ *
+ * @param omMetadataManager OM Metadata Manager
+ * @param dbFileKey file name key
+ * @param omFileInfo key info
+ * @param fileName file name
+ * @param trxnLogIndex transaction log index
+ * @return dbOmFileInfo, which keeps leaf node name in keyName field
+ */
+ public static void addFileTableCacheEntry(
+ OMMetadataManager omMetadataManager, String dbFileKey,
+ OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+
+ // New key format for the fileTable.
+ // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ // keyName field stores only the leaf node name, which is 'file1'.
+ omFileInfo.setKeyName(fileName);
+
+ omMetadataManager.getKeyTable().addCacheEntry(
+ new CacheKey<>(dbFileKey),
+ new CacheValue<>(Optional.of(omFileInfo), trxnLogIndex));
+ }
+
+ /**
+ * Adding omKeyInfo to open file table.
+ *
+ * @param omMetadataMgr OM Metadata Manager
+ * @param batchOp batch of db operations
+ * @param omFileInfo omKeyInfo
+ * @param openKeySessionID clientID
+ * @throws IOException DB failure
+ */
+ public static void addToOpenFileTable(OMMetadataManager omMetadataMgr,
+ BatchOperation batchOp,
+ OmKeyInfo omFileInfo,
+ long openKeySessionID)
+ throws IOException {
+
+ String dbOpenFileKey = omMetadataMgr.getOpenFileName(
+ omFileInfo.getParentObjectID(), omFileInfo.getFileName(),
+ openKeySessionID);
+
+ omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, dbOpenFileKey,
+ omFileInfo);
+ }
+
+ /**
+ * Adding omKeyInfo to file table.
+ *
+ * @param omMetadataMgr
+ * @param batchOp
+ * @param omFileInfo
+ * @throws IOException
+ */
+ public static void addToFileTable(OMMetadataManager omMetadataMgr,
+ BatchOperation batchOp,
+ OmKeyInfo omFileInfo)
+ throws IOException {
+
+ String dbFileKey = omMetadataMgr.getOzonePathKey(
+ omFileInfo.getParentObjectID(), omFileInfo.getFileName());
+
+ omMetadataMgr.getKeyTable().putWithBatch(batchOp,
+ dbFileKey, omFileInfo);
+ }
+
+ /**
+ * Gets om key info from open key table if openFileTable flag is true,
+ * otherwise get it from key table.
+ *
+ * @param openFileTable if true add KeyInfo to openFileTable, otherwise to
+ * fileTable
+ * @param omMetadataMgr OM Metadata Manager
+ * @param dbOpenFileKey open file kaye name in DB
+ * @param keyName key name
+ * @return om key info
+ * @throws IOException DB failure
+ */
+ public static OmKeyInfo getOmKeyInfoFromFileTable(boolean openFileTable,
+ OMMetadataManager omMetadataMgr, String dbOpenFileKey, String keyName)
+ throws IOException {
+
+ OmKeyInfo dbOmKeyInfo;
+ if (openFileTable) {
+ dbOmKeyInfo = omMetadataMgr.getOpenKeyTable().get(dbOpenFileKey);
+ } else {
+ dbOmKeyInfo = omMetadataMgr.getKeyTable().get(dbOpenFileKey);
+ }
+
+ // DB OMKeyInfo will store only fileName into keyName field. This
+ // function is to set user given keyName into the OmKeyInfo object.
+ // For example, the user given key path is '/a/b/c/d/e/file1', then in DB
+ // keyName field stores only the leaf node name, which is 'file1'.
+ if (dbOmKeyInfo != null) {
+ dbOmKeyInfo.setKeyName(keyName);
+ }
+ return dbOmKeyInfo;
+ }
+
+ /**
+ * Gets OmKeyInfo if exists for the given key name in the DB.
+ *
+ * @param omMetadataMgr metadata manager
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param keyName key name
+ * @param scmBlockSize scm block size
+ * @return OzoneFileStatus
+ * @throws IOException DB failure
+ */
+ @Nullable
+ public static OzoneFileStatus getOMKeyInfoIfExists(
+ OMMetadataManager omMetadataMgr, String volumeName, String bucketName,
+ String keyName, long scmBlockSize) throws IOException {
+
+ Path keyPath = Paths.get(keyName);
+ Iterator elements = keyPath.iterator();
+ String bucketKey = omMetadataMgr.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataMgr.getBucketTable().get(bucketKey);
+
+ long lastKnownParentId = omBucketInfo.getObjectID();
+ OmDirectoryInfo omDirInfo = null;
+ while (elements.hasNext()) {
+ String fileName = elements.next().toString();
+
+ // For example, /vol1/buck1/a/b/c/d/e/file1.txt
+ // 1. Do lookup path component on directoryTable starting from bucket
+ // 'buck1' to the leaf node component, which is 'file1.txt'.
+ // 2. If there is no dir exists for the leaf node component 'file1.txt'
+ // then do look it on fileTable.
+ String dbNodeName = omMetadataMgr.getOzonePathKey(
+ lastKnownParentId, fileName);
+ omDirInfo = omMetadataMgr.getDirectoryTable().get(dbNodeName);
+
+ if (omDirInfo != null) {
+ lastKnownParentId = omDirInfo.getObjectID();
+ } else if (!elements.hasNext()) {
+ // reached last path component. Check file exists for the given path.
+ OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(false,
+ omMetadataMgr, dbNodeName, keyName);
+ if (omKeyInfo != null) {
+ return new OzoneFileStatus(omKeyInfo, scmBlockSize, false);
+ }
+ } else {
+ // Missing intermediate directory and just return null;
+ // key not found in DB
+ return null;
+ }
+ }
+
+ if (omDirInfo != null) {
+ OmKeyInfo omKeyInfo = getOmKeyInfo(volumeName, bucketName, omDirInfo,
+ keyName);
+ return new OzoneFileStatus(omKeyInfo, scmBlockSize, true);
+ }
+
+ // key not found in DB
+ return null;
+ }
+
+ /**
+ * Prepare OmKeyInfo from OmDirectoryInfo.
+ *
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param dirInfo directory info
+ * @param keyName user given key name
+ * @return OmKeyInfo object
+ */
+ @NotNull
+ public static OmKeyInfo getOmKeyInfo(String volumeName, String bucketName,
+ OmDirectoryInfo dirInfo, String keyName) {
+
+ OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+ builder.setParentObjectID(dirInfo.getParentObjectID());
+ builder.setKeyName(keyName);
+ builder.setAcls(dirInfo.getAcls());
+ builder.addAllMetadata(dirInfo.getMetadata());
+ builder.setVolumeName(volumeName);
+ builder.setBucketName(bucketName);
+ builder.setCreationTime(dirInfo.getCreationTime());
+ builder.setModificationTime(dirInfo.getModificationTime());
+ builder.setObjectID(dirInfo.getObjectID());
+ builder.setUpdateID(dirInfo.getUpdateID());
+ builder.setFileName(dirInfo.getName());
+ builder.setReplicationType(HddsProtos.ReplicationType.RATIS);
+ builder.setReplicationFactor(HddsProtos.ReplicationFactor.ONE);
+ builder.setOmKeyLocationInfos(Collections.singletonList(
+ new OmKeyLocationInfoGroup(0, new ArrayList<>())));
+ return builder.build();
+ }
+
+ /**
+ * Build DirectoryInfo from OmKeyInfo.
+ *
+ * @param keyInfo omKeyInfo
+ * @return omDirectoryInfo object
+ */
+ public static OmDirectoryInfo getDirectoryInfo(OmKeyInfo keyInfo){
+ OmDirectoryInfo.Builder builder = new OmDirectoryInfo.Builder();
+ builder.setParentObjectID(keyInfo.getParentObjectID());
+ builder.setAcls(keyInfo.getAcls());
+ builder.addAllMetadata(keyInfo.getMetadata());
+ builder.setCreationTime(keyInfo.getCreationTime());
+ builder.setModificationTime(keyInfo.getModificationTime());
+ builder.setObjectID(keyInfo.getObjectID());
+ builder.setUpdateID(keyInfo.getUpdateID());
+ builder.setName(OzoneFSUtils.getFileName(keyInfo.getKeyName()));
+ return builder.build();
+ }
+
+ /**
+ * Verify that the given toKey directory is a sub directory of fromKey
+ * directory.
+ *
+ * For example, special case of renaming a directory to its own
+ * sub-directory is not allowed.
+ *
+ * @param fromKeyName source path
+ * @param toKeyName destination path
+ */
+ public static void verifyToDirIsASubDirOfFromDirectory(String fromKeyName,
+ String toKeyName, boolean isDir){
+ if (!isDir) {
+ return;
+ }
+ Path dstParent = Paths.get(toKeyName).getParent();
+ while (dstParent != null && !Paths.get(fromKeyName).equals(dstParent)) {
+ dstParent = dstParent.getParent();
+ }
+ Preconditions.checkArgument(dstParent == null,
+ "Cannot rename a directory to its own subdirectory");
+ return;
+ }
+
+ /**
+ * Verify parent exists for the destination path and return destination
+ * path parent Id.
+ *
+ * Check whether dst parent dir exists or not. If the parent exists, then the
+ * source can be renamed to dst path.
+ *
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param toKeyName destination path
+ * @param fromKeyName source path
+ * @param metaMgr metadata manager
+ * @throws IOException if the destination parent dir doesn't exists.
+ */
+ public static long getToKeyNameParentId(String volumeName,
+ String bucketName, String toKeyName, String fromKeyName,
+ OMMetadataManager metaMgr) throws IOException {
+
+ int totalDirsCount = OzoneFSUtils.getFileCount(toKeyName);
+ // skip parent is root '/'
+ if (totalDirsCount <= 1) {
+ String bucketKey = metaMgr.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ metaMgr.getBucketTable().get(bucketKey);
+ return omBucketInfo.getObjectID();
+ }
+
+ String toKeyParentDir = OzoneFSUtils.getParentDir(toKeyName);
+
+ OzoneFileStatus toKeyParentDirStatus = getOMKeyInfoIfExists(metaMgr,
+ volumeName, bucketName, toKeyParentDir, 0);
+ // check if the immediate parent exists
+ if (toKeyParentDirStatus == null) {
+ throw new IOException(String.format(
+ "Failed to rename %s to %s, %s is a file", fromKeyName, toKeyName,
+ toKeyParentDir));
+ }
+ return toKeyParentDirStatus.getKeyInfo().getObjectID();
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
index 29d0243dea6..9980ccc0499 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java
@@ -228,6 +228,30 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
exception, getOmRequest().getUserInfo()));
+ processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+ exception, omKeyInfo, result);
+
+ return omClientResponse;
+ }
+
+ /**
+ * Process result of om request execution.
+ *
+ * @param commitKeyRequest commit key request
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param keyName key name
+ * @param omMetrics om metrics
+ * @param exception exception trace
+ * @param omKeyInfo omKeyInfo
+ * @param result stores the result of the execution
+ */
+ @SuppressWarnings("parameternumber")
+ protected void processResult(CommitKeyRequest commitKeyRequest,
+ String volumeName, String bucketName,
+ String keyName, OMMetrics omMetrics,
+ IOException exception, OmKeyInfo omKeyInfo,
+ Result result) {
switch (result) {
case SUCCESS:
// As when we commit the key, then it is visible in ozone, so we should
@@ -239,18 +263,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
omMetrics.incNumKeys();
}
LOG.debug("Key committed. Volume:{}, Bucket:{}, Key:{}", volumeName,
- bucketName, keyName);
+ bucketName, keyName);
break;
case FAILURE:
- LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}.",
- volumeName, bucketName, keyName, exception);
+ LOG.error("Key commit failed. Volume:{}, Bucket:{}, Key:{}. Exception:{}",
+ volumeName, bucketName, keyName, exception);
omMetrics.incNumKeyCommitFails();
break;
default:
LOG.error("Unrecognized Result for OMKeyCommitRequest: {}",
- commitKeyRequest);
+ commitKeyRequest);
}
-
- return omClientResponse;
}
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
new file mode 100644
index 00000000000..e985a9aed7e
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java
@@ -0,0 +1,272 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles CommitKey request layout version V1.
+ */
+public class OMKeyCommitRequestV1 extends OMKeyCommitRequest {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OMKeyCommitRequestV1.class);
+
+ public OMKeyCommitRequestV1(OMRequest omRequest) {
+ super(omRequest);
+ }
+
+ @Override
+ @SuppressWarnings("methodlength")
+ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+ long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+ CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest();
+
+ KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs();
+
+ String volumeName = commitKeyArgs.getVolumeName();
+ String bucketName = commitKeyArgs.getBucketName();
+ String keyName = commitKeyArgs.getKeyName();
+
+ OMMetrics omMetrics = ozoneManager.getMetrics();
+ omMetrics.incNumKeyCommits();
+
+ AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+ Map auditMap = buildKeyArgsAuditMap(commitKeyArgs);
+
+ OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+ getOmRequest());
+
+ IOException exception = null;
+ OmKeyInfo omKeyInfo = null;
+ OmVolumeArgs omVolumeArgs = null;
+ OmBucketInfo omBucketInfo = null;
+ OMClientResponse omClientResponse = null;
+ boolean bucketLockAcquired = false;
+ Result result;
+
+ OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+
+ try {
+ commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap);
+ volumeName = commitKeyArgs.getVolumeName();
+ bucketName = commitKeyArgs.getBucketName();
+
+ // check Acl
+ checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName,
+ keyName, IAccessAuthorizer.ACLType.WRITE,
+ commitKeyRequest.getClientID());
+
+
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ Iterator pathComponents = Paths.get(keyName).iterator();
+ String dbOpenFileKey = null;
+
+ List locationInfoList = new ArrayList<>();
+ for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) {
+ locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation));
+ }
+
+ bucketLockAcquired =
+ omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+ volumeName, bucketName);
+
+ validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketId = omBucketInfo.getObjectID();
+ long parentID = getParentID(bucketId, pathComponents, keyName,
+ omMetadataManager, ozoneManager);
+ String dbFileKey = omMetadataManager.getOzonePathKey(parentID, fileName);
+ dbOpenFileKey = omMetadataManager.getOpenFileName(parentID, fileName,
+ commitKeyRequest.getClientID());
+
+ omKeyInfo = OMFileRequest.getOmKeyInfoFromFileTable(true,
+ omMetadataManager, dbOpenFileKey, keyName);
+ if (omKeyInfo == null) {
+ throw new OMException("Failed to commit key, as " + dbOpenFileKey +
+ "entry is not found in the OpenKey table", KEY_NOT_FOUND);
+ }
+ omKeyInfo.setDataSize(commitKeyArgs.getDataSize());
+
+ omKeyInfo.setModificationTime(commitKeyArgs.getModificationTime());
+
+ // Update the block length for each block
+ omKeyInfo.updateLocationInfoList(locationInfoList);
+
+ // Set the UpdateID to current transactionLogIndex
+ omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+
+ // Add to cache of open key table and key table.
+ OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, dbFileKey,
+ null, fileName, trxnLogIndex);
+
+ OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey,
+ omKeyInfo, fileName, trxnLogIndex);
+
+ long scmBlockSize = ozoneManager.getScmBlockSize();
+ int factor = omKeyInfo.getFactor().getNumber();
+ omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName);
+ // update usedBytes atomically.
+ // Block was pre-requested and UsedBytes updated when createKey and
+ // AllocatedBlock. The space occupied by the Key shall be based on
+ // the actual Key size, and the total Block size applied before should
+ // be subtracted.
+ long correctedSpace = omKeyInfo.getDataSize() * factor -
+ locationInfoList.size() * scmBlockSize * factor;
+ omVolumeArgs.getUsedBytes().add(correctedSpace);
+ omBucketInfo.getUsedBytes().add(correctedSpace);
+
+ omClientResponse = new OMKeyCommitResponseV1(omResponse.build(),
+ omKeyInfo, dbFileKey, dbOpenFileKey, omVolumeArgs, omBucketInfo);
+
+ result = Result.SUCCESS;
+ } catch (IOException ex) {
+ result = Result.FAILURE;
+ exception = ex;
+ omClientResponse = new OMKeyCommitResponseV1(createErrorOMResponse(
+ omResponse, exception));
+ } finally {
+ addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+ omDoubleBufferHelper);
+
+ if(bucketLockAcquired) {
+ omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+ bucketName);
+ }
+ }
+
+ auditLog(auditLogger, buildAuditMessage(OMAction.COMMIT_KEY, auditMap,
+ exception, getOmRequest().getUserInfo()));
+
+ processResult(commitKeyRequest, volumeName, bucketName, keyName, omMetrics,
+ exception, omKeyInfo, result);
+
+ return omClientResponse;
+ }
+
+
+ /**
+ * Check for directory exists with same name, if it exists throw error.
+ *
+ * @param keyName key name
+ * @param ozoneManager Ozone Manager
+ * @param reachedLastPathComponent true if the path component is a fileName
+ * @throws IOException if directory exists with same name
+ */
+ private void checkDirectoryAlreadyExists(String keyName,
+ OzoneManager ozoneManager,
+ boolean reachedLastPathComponent)
+ throws IOException {
+ // Reached last component, which would be a file. Returns its parentID.
+ if (reachedLastPathComponent && ozoneManager.getEnableFileSystemPaths()) {
+ throw new OMException("Can not create file: " + keyName +
+ " as there is already directory in the given path", NOT_A_FILE);
+ }
+ }
+
+ /**
+ * Get parent id for the user given path.
+ *
+ * @param bucketId bucket id
+ * @param pathComponents fie path elements
+ * @param keyName user given key name
+ * @param omMetadataManager metadata manager
+ * @return lastKnownParentID
+ * @throws IOException DB failure or parent not exists in DirectoryTable
+ */
+ private long getParentID(long bucketId, Iterator pathComponents,
+ String keyName, OMMetadataManager omMetadataManager,
+ OzoneManager ozoneManager)
+ throws IOException {
+
+ long lastKnownParentId = bucketId;
+
+ // If no sub-dirs then bucketID is the root/parent.
+ if(!pathComponents.hasNext()){
+ return bucketId;
+ }
+
+ OmDirectoryInfo omDirectoryInfo;
+ while (pathComponents.hasNext()) {
+ String nodeName = pathComponents.next().toString();
+ boolean reachedLastPathComponent = !pathComponents.hasNext();
+ String dbNodeName =
+ omMetadataManager.getOzonePathKey(lastKnownParentId, nodeName);
+
+ omDirectoryInfo = omMetadataManager.
+ getDirectoryTable().get(dbNodeName);
+ if (omDirectoryInfo != null) {
+ checkDirectoryAlreadyExists(keyName, ozoneManager,
+ reachedLastPathComponent);
+ lastKnownParentId = omDirectoryInfo.getObjectID();
+ } else {
+ // One of the sub-dir doesn't exists in DB. Immediate parent should
+ // exists for committing the key, otherwise will fail the operation.
+ if (!reachedLastPathComponent) {
+ throw new OMException("Failed to commit key, as parent directory of "
+ + keyName + " entry is not found in DirectoryTable",
+ KEY_NOT_FOUND);
+ }
+ break;
+ }
+ }
+
+ return lastKnownParentId;
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
new file mode 100644
index 00000000000..91eafd804d1
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestV1.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import com.google.common.base.Optional;
+import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.OMAction;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponse;
+import org.apache.hadoop.ozone.om.response.key.OMKeyRenameResponseV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*;
+import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
+import org.apache.hadoop.ozone.security.acl.OzoneObj;
+import org.jetbrains.annotations.NotNull;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Map;
+
+import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND;
+import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
+
+/**
+ * Handles rename key request layout version V1.
+ */
+public class OMKeyRenameRequestV1 extends OMKeyRenameRequest {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(OMKeyRenameRequestV1.class);
+
+ public OMKeyRenameRequestV1(OMRequest omRequest) {
+ super(omRequest);
+ }
+
+ @Override
+ @SuppressWarnings("methodlength")
+ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+ long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+
+ RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest();
+ KeyArgs keyArgs = renameKeyRequest.getKeyArgs();
+ Map auditMap = buildAuditMap(keyArgs, renameKeyRequest);
+
+ String volumeName = keyArgs.getVolumeName();
+ String bucketName = keyArgs.getBucketName();
+ String fromKeyName = keyArgs.getKeyName();
+ String toKeyName = renameKeyRequest.getToKeyName();
+
+ OMMetrics omMetrics = ozoneManager.getMetrics();
+ omMetrics.incNumKeyRenames();
+
+ AuditLogger auditLogger = ozoneManager.getAuditLogger();
+
+ OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
+ getOmRequest());
+
+ OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
+ boolean acquiredLock = false;
+ OMClientResponse omClientResponse = null;
+ IOException exception = null;
+ OmKeyInfo fromKeyValue;
+ String fromKey = null;
+ Result result;
+ try {
+ if (toKeyName.length() == 0 || fromKeyName.length() == 0) {
+ throw new OMException("Key name is empty",
+ OMException.ResultCodes.INVALID_KEY_NAME);
+ }
+
+ keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
+ volumeName = keyArgs.getVolumeName();
+ bucketName = keyArgs.getBucketName();
+
+ // check Acls to see if user has access to perform delete operation on
+ // old key and create operation on new key
+ checkKeyAcls(ozoneManager, volumeName, bucketName, fromKeyName,
+ IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
+ checkKeyAcls(ozoneManager, volumeName, bucketName, toKeyName,
+ IAccessAuthorizer.ACLType.CREATE, OzoneObj.ResourceType.KEY);
+
+ acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
+ volumeName, bucketName);
+
+ // Validate bucket and volume exists or not.
+ validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
+
+ // Check if toKey exists
+ OzoneFileStatus fromKeyFileStatus =
+ OMFileRequest.getOMKeyInfoIfExists(omMetadataManager, volumeName,
+ bucketName, fromKeyName, 0);
+ // fromKeyName should exist
+ if (fromKeyFileStatus == null) {
+ // TODO: Add support for renaming open key
+ throw new OMException("Key not found " + fromKey, KEY_NOT_FOUND);
+ }
+
+ // source exists
+ fromKeyValue = fromKeyFileStatus.getKeyInfo();
+ boolean isRenameDirectory = fromKeyFileStatus.isDirectory();
+
+ OzoneFileStatus toKeyFileStatus =
+ OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+ volumeName, bucketName, toKeyName, 0);
+ OmKeyInfo toKeyValue;
+
+ // Destination exists cases:
+ if(toKeyFileStatus != null) {
+
+ toKeyValue = toKeyFileStatus.getKeyInfo();
+
+ if (fromKeyValue.getKeyName().equals(toKeyValue.getKeyName())) {
+ // case-1) src == destin then check source and destin of same type
+ // If dst is a file then return true. Otherwise fail the operation.
+ if (!toKeyFileStatus.isDirectory()) {
+ result = Result.SUCCESS;
+ } else {
+ throw new OMException("Key already exists " + toKeyName,
+ OMException.ResultCodes.KEY_ALREADY_EXISTS);
+ }
+ } else if (toKeyFileStatus.isDirectory()) {
+ // case-2) If dst is a directory, rename source as sub-path of it.
+ // For example: rename /source to /dst will lead to /dst/source
+ String fromFileName = OzoneFSUtils.getFileName(fromKeyName);
+ String newToKeyName = OzoneFSUtils.appendFileNameToKeyPath(toKeyName,
+ fromFileName);
+ OzoneFileStatus newToOzoneFileStatus =
+ OMFileRequest.getOMKeyInfoIfExists(omMetadataManager,
+ volumeName, bucketName, newToKeyName, 0);
+
+ if (newToOzoneFileStatus != null) {
+ // If new destination '/dst/source' exists.
+ throw new OMException(String.format(
+ "Failed to rename %s to %s, file already exists or not " +
+ "empty!", fromKeyName, newToKeyName),
+ OMException.ResultCodes.KEY_ALREADY_EXISTS);
+ }
+ omClientResponse = renameKey(toKeyValue.getObjectID(), trxnLogIndex,
+ fromKeyValue, isRenameDirectory, newToKeyName,
+ keyArgs.getModificationTime(), omResponse, ozoneManager);
+ result = Result.SUCCESS;
+ } else {
+ // case-3) destination is a file and should not exist
+ throw new OMException("Failed to rename, key already exists "
+ + toKeyName, OMException.ResultCodes.KEY_ALREADY_EXISTS);
+ }
+ } else {
+ // Destination doesn't exists cases:
+
+ // Cannot rename a directory to its own subdirectory
+ OMFileRequest.verifyToDirIsASubDirOfFromDirectory(fromKeyName,
+ toKeyName, fromKeyFileStatus.isDirectory());
+
+ // Destination doesn't exist, check whether dst parent dir exists or not
+ // if the parent exists, the source can still be renamed to dst path
+ long toKeyParentId = OMFileRequest.getToKeyNameParentId(volumeName,
+ bucketName, toKeyName, fromKeyName, omMetadataManager);
+
+ omClientResponse = renameKey(toKeyParentId, trxnLogIndex,
+ fromKeyValue, isRenameDirectory, toKeyName,
+ keyArgs.getModificationTime(), omResponse, ozoneManager);
+
+ result = Result.SUCCESS;
+ }
+ } catch (IOException ex) {
+ result = Result.FAILURE;
+ exception = ex;
+ omClientResponse = new OMKeyRenameResponse(createErrorOMResponse(
+ omResponse, exception));
+ } finally {
+ addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
+ omDoubleBufferHelper);
+ if (acquiredLock) {
+ omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
+ bucketName);
+ }
+ }
+
+ auditLog(auditLogger, buildAuditMessage(OMAction.RENAME_KEY, auditMap,
+ exception, getOmRequest().getUserInfo()));
+
+ switch (result) {
+ case SUCCESS:
+ LOG.debug("Rename Key is successfully completed for volume:{} bucket:{}" +
+ " fromKey:{} toKey:{}. ", volumeName, bucketName,
+ fromKeyName, toKeyName);
+ break;
+ case FAILURE:
+ ozoneManager.getMetrics().incNumKeyRenameFails();
+ LOG.error("Rename key failed for volume:{} bucket:{} fromKey:{} " +
+ "toKey:{}. Key: {} not found.", volumeName, bucketName,
+ fromKeyName, toKeyName, fromKeyName);
+ break;
+ default:
+ LOG.error("Unrecognized Result for OMKeyRenameRequest: {}",
+ renameKeyRequest);
+ }
+ return omClientResponse;
+ }
+
+ @SuppressWarnings("parameternumber")
+ private OMClientResponse renameKey(long toKeyParentId,
+ long trxnLogIndex, OmKeyInfo fromKeyValue,
+ boolean isRenameDirectory, String toKeyName,
+ long modificationTime, OMResponse.Builder omResponse,
+ OzoneManager ozoneManager) {
+
+ String dbFromKey = fromKeyValue.getPath();
+ String toKeyFileName = OzoneFSUtils.getFileName(toKeyName);
+
+ fromKeyValue.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
+ // Set toFileName
+ fromKeyValue.setKeyName(toKeyFileName);
+ fromKeyValue.setFileName(toKeyFileName);
+ // Set toKeyObjectId
+ fromKeyValue.setParentObjectID(toKeyParentId);
+ //Set modification time
+ fromKeyValue.setModificationTime(modificationTime);
+
+ // destination dbKeyName
+ String dbToKey = fromKeyValue.getPath();
+
+ // Add to cache.
+ // dbFromKey should be deleted, dbToKey should be added with newly updated
+ // omKeyInfo.
+ // Add from_key and to_key details into cache.
+ OMMetadataManager metadataMgr = ozoneManager.getMetadataManager();
+ if (isRenameDirectory) {
+ Table dirTable = metadataMgr.getDirectoryTable();
+ dirTable.addCacheEntry(new CacheKey<>(dbFromKey),
+ new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+ dirTable.addCacheEntry(new CacheKey<>(dbToKey),
+ new CacheValue<>(Optional.of(OMFileRequest.
+ getDirectoryInfo(fromKeyValue)), trxnLogIndex));
+ } else {
+ Table keyTable = metadataMgr.getKeyTable();
+
+ keyTable.addCacheEntry(new CacheKey<>(dbFromKey),
+ new CacheValue<>(Optional.absent(), trxnLogIndex));
+
+ keyTable.addCacheEntry(new CacheKey<>(dbToKey),
+ new CacheValue<>(Optional.of(fromKeyValue), trxnLogIndex));
+ }
+
+ OMClientResponse omClientResponse = new OMKeyRenameResponseV1(omResponse
+ .setRenameKeyResponse(RenameKeyResponse.newBuilder()).build(),
+ dbFromKey, dbToKey, fromKeyValue, isRenameDirectory);
+ return omClientResponse;
+ }
+
+ private Map buildAuditMap(
+ KeyArgs keyArgs, RenameKeyRequest renameKeyRequest) {
+ Map auditMap = buildKeyArgsAuditMap(keyArgs);
+ auditMap.remove(OzoneConsts.KEY);
+ auditMap.put(OzoneConsts.SRC_KEY, keyArgs.getKeyName());
+ auditMap.put(OzoneConsts.DST_KEY, renameKeyRequest.getToKeyName());
+ return auditMap;
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
index a048533001e..8abe7d9edd5 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java
@@ -238,40 +238,6 @@ public EncryptedKeyVersion run() throws IOException {
return edek;
}
- /**
- * Create OmKeyInfo object.
- * @return OmKeyInfo
- */
- @SuppressWarnings("parameterNumber")
- protected OmKeyInfo createKeyInfo(@Nonnull KeyArgs keyArgs,
- @Nonnull List locations,
- @Nonnull HddsProtos.ReplicationFactor factor,
- @Nonnull HddsProtos.ReplicationType type, long size,
- @Nullable FileEncryptionInfo encInfo,
- @Nonnull PrefixManager prefixManager,
- @Nullable OmBucketInfo omBucketInfo,
- long transactionLogIndex) {
- long objectID = OMFileRequest.getObjIDFromTxId(transactionLogIndex);
-
- return new OmKeyInfo.Builder()
- .setVolumeName(keyArgs.getVolumeName())
- .setBucketName(keyArgs.getBucketName())
- .setKeyName(keyArgs.getKeyName())
- .setOmKeyLocationInfos(Collections.singletonList(
- new OmKeyLocationInfoGroup(0, locations)))
- .setCreationTime(keyArgs.getModificationTime())
- .setModificationTime(keyArgs.getModificationTime())
- .setDataSize(size)
- .setReplicationType(type)
- .setReplicationFactor(factor)
- .setFileEncryptionInfo(encInfo)
- .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
- .addAllMetadata(KeyValueUtil.getFromProtobuf(keyArgs.getMetadataList()))
- .setObjectID(objectID)
- .setUpdateID(transactionLogIndex)
- .build();
- }
-
private List< OzoneAcl > getAclsForKey(KeyArgs keyArgs,
OmBucketInfo bucketInfo, PrefixManager prefixManager) {
List acls = new ArrayList<>();
@@ -325,76 +291,10 @@ protected OmKeyInfo prepareKeyInfo(
@Nullable OmBucketInfo omBucketInfo,
long transactionLogIndex, boolean isRatisEnabled)
throws IOException {
- if (keyArgs.getIsMultipartKey()) {
- return prepareMultipartKeyInfo(omMetadataManager, keyArgs,
- size, locations, encInfo, prefixManager, omBucketInfo,
- transactionLogIndex);
- //TODO args.getMetadata
- }
- if (dbKeyInfo != null) {
- // TODO: Need to be fixed, as when key already exists, we are
- // appending new blocks to existing key.
- // The key already exist, the new blocks will be added as new version
- // when locations.size = 0, the new version will have identical blocks
- // as its previous version
- dbKeyInfo.addNewVersion(locations, false);
- dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
- // The modification time is set in preExecute. Use the same
- // modification time.
- dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
- dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
- return dbKeyInfo;
- }
- // the key does not exist, create a new object.
- // Blocks will be appended as version 0.
- return createKeyInfo(keyArgs, locations, keyArgs.getFactor(),
- keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
- omBucketInfo, transactionLogIndex);
- }
-
- /**
- * Prepare OmKeyInfo for multi-part upload part key which will be persisted
- * to openKeyTable.
- * @return OmKeyInfo
- * @throws IOException
- */
- @SuppressWarnings("parameternumber")
- private OmKeyInfo prepareMultipartKeyInfo(
- @Nonnull OMMetadataManager omMetadataManager,
- @Nonnull KeyArgs args, long size,
- @Nonnull List locations,
- FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager,
- @Nullable OmBucketInfo omBucketInfo, @Nonnull long transactionLogIndex)
- throws IOException {
- HddsProtos.ReplicationFactor factor;
- HddsProtos.ReplicationType type;
-
- Preconditions.checkArgument(args.getMultipartNumber() > 0,
- "PartNumber Should be greater than zero");
- // When key is multipart upload part key, we should take replication
- // type and replication factor from original key which has done
- // initiate multipart upload. If we have not found any such, we throw
- // error no such multipart upload.
- String uploadID = args.getMultipartUploadID();
- Preconditions.checkNotNull(uploadID);
- String multipartKey = omMetadataManager
- .getMultipartKey(args.getVolumeName(), args.getBucketName(),
- args.getKeyName(), uploadID);
- OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
- multipartKey);
- if (partKeyInfo == null) {
- throw new OMException("No such Multipart upload is with specified " +
- "uploadId " + uploadID,
- OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
- } else {
- factor = partKeyInfo.getFactor();
- type = partKeyInfo.getType();
- }
- // For this upload part we don't need to check in KeyTable. As this
- // is not an actual key, it is a part of the key.
- return createKeyInfo(args, locations, factor, type, size, encInfo,
- prefixManager, omBucketInfo, transactionLogIndex);
+ return prepareFileInfo(omMetadataManager, keyArgs, dbKeyInfo, size,
+ locations, encInfo, prefixManager, omBucketInfo, null,
+ transactionLogIndex, isRatisEnabled);
}
/**
@@ -415,7 +315,6 @@ protected void checkBucketAcls(OzoneManager ozoneManager, String volume,
}
}
-
/**
* Check Acls for the ozone key.
* @param ozoneManager
@@ -650,4 +549,139 @@ protected OmBucketInfo getBucketInfo(OMMetadataManager omMetadataManager,
new CacheKey<>(omMetadataManager.getBucketKey(volume, bucket)))
.getCacheValue();
}
+
+ /**
+ * Prepare OmKeyInfo which will be persisted to openKeyTable.
+ * @return OmKeyInfo
+ * @throws IOException
+ */
+ @SuppressWarnings("parameternumber")
+ protected OmKeyInfo prepareFileInfo(
+ @Nonnull OMMetadataManager omMetadataManager,
+ @Nonnull KeyArgs keyArgs, OmKeyInfo dbKeyInfo, long size,
+ @Nonnull List locations,
+ @Nullable FileEncryptionInfo encInfo,
+ @Nonnull PrefixManager prefixManager,
+ @Nullable OmBucketInfo omBucketInfo,
+ OMFileRequest.OMPathInfoV1 omPathInfo,
+ long transactionLogIndex, boolean isRatisEnabled)
+ throws IOException {
+ if (keyArgs.getIsMultipartKey()) {
+ return prepareMultipartFileInfo(omMetadataManager, keyArgs,
+ size, locations, encInfo, prefixManager, omBucketInfo,
+ omPathInfo, transactionLogIndex);
+ //TODO args.getMetadata
+ }
+ if (dbKeyInfo != null) {
+ // TODO: Need to be fixed, as when key already exists, we are
+ // appending new blocks to existing key.
+ // The key already exist, the new blocks will be added as new version
+ // when locations.size = 0, the new version will have identical blocks
+ // as its previous version
+ dbKeyInfo.addNewVersion(locations, false);
+ dbKeyInfo.setDataSize(size + dbKeyInfo.getDataSize());
+ // The modification time is set in preExecute. Use the same
+ // modification time.
+ dbKeyInfo.setModificationTime(keyArgs.getModificationTime());
+ dbKeyInfo.setUpdateID(transactionLogIndex, isRatisEnabled);
+ return dbKeyInfo;
+ }
+
+ // the key does not exist, create a new object.
+ // Blocks will be appended as version 0.
+ return createFileInfo(keyArgs, locations, keyArgs.getFactor(),
+ keyArgs.getType(), keyArgs.getDataSize(), encInfo, prefixManager,
+ omBucketInfo, omPathInfo, transactionLogIndex);
+ }
+
+ /**
+ * Create OmKeyInfo object.
+ * @return OmKeyInfo
+ */
+ @SuppressWarnings("parameterNumber")
+ protected OmKeyInfo createFileInfo(@Nonnull KeyArgs keyArgs,
+ @Nonnull List locations,
+ @Nonnull HddsProtos.ReplicationFactor factor,
+ @Nonnull HddsProtos.ReplicationType type, long size,
+ @Nullable FileEncryptionInfo encInfo,
+ @Nonnull PrefixManager prefixManager,
+ @Nullable OmBucketInfo omBucketInfo,
+ OMFileRequest.OMPathInfoV1 omPathInfo,
+ long transactionLogIndex) {
+
+ OmKeyInfo.Builder builder = new OmKeyInfo.Builder();
+ builder.setVolumeName(keyArgs.getVolumeName())
+ .setBucketName(keyArgs.getBucketName())
+ .setKeyName(keyArgs.getKeyName())
+ .setOmKeyLocationInfos(Collections.singletonList(
+ new OmKeyLocationInfoGroup(0, locations)))
+ .setCreationTime(keyArgs.getModificationTime())
+ .setModificationTime(keyArgs.getModificationTime())
+ .setDataSize(size)
+ .setReplicationType(type)
+ .setReplicationFactor(factor)
+ .setFileEncryptionInfo(encInfo)
+ .setAcls(getAclsForKey(keyArgs, omBucketInfo, prefixManager))
+ .addAllMetadata(KeyValueUtil.getFromProtobuf(
+ keyArgs.getMetadataList()))
+ .setUpdateID(transactionLogIndex);
+ long objectID;
+ if (omPathInfo == null) {
+ // KeyTable metadata format
+ objectID = OMFileRequest.getObjIDFromTxId(transactionLogIndex);
+ } else {
+ // FileTable metadata format
+ objectID = omPathInfo.getLeafNodeObjectId();
+ builder.setParentObjectID(omPathInfo.getLastKnownParentId());
+ builder.setFileName(omPathInfo.getLeafNodeName());
+ }
+ builder.setObjectID(objectID);
+ return builder.build();
+ }
+
+ /**
+ * Prepare OmKeyInfo for multi-part upload part key which will be persisted
+ * to openKeyTable.
+ * @return OmKeyInfo
+ * @throws IOException
+ */
+ @SuppressWarnings("parameternumber")
+ private OmKeyInfo prepareMultipartFileInfo(
+ @Nonnull OMMetadataManager omMetadataManager,
+ @Nonnull KeyArgs args, long size,
+ @Nonnull List locations,
+ FileEncryptionInfo encInfo, @Nonnull PrefixManager prefixManager,
+ @Nullable OmBucketInfo omBucketInfo,
+ OMFileRequest.OMPathInfoV1 omPathInfo,
+ @Nonnull long transactionLogIndex)
+ throws IOException {
+ HddsProtos.ReplicationFactor factor;
+ HddsProtos.ReplicationType type;
+
+ Preconditions.checkArgument(args.getMultipartNumber() > 0,
+ "PartNumber Should be greater than zero");
+ // When key is multipart upload part key, we should take replication
+ // type and replication factor from original key which has done
+ // initiate multipart upload. If we have not found any such, we throw
+ // error no such multipart upload.
+ String uploadID = args.getMultipartUploadID();
+ Preconditions.checkNotNull(uploadID);
+ String multipartKey = omMetadataManager
+ .getMultipartKey(args.getVolumeName(), args.getBucketName(),
+ args.getKeyName(), uploadID);
+ OmKeyInfo partKeyInfo = omMetadataManager.getOpenKeyTable().get(
+ multipartKey);
+ if (partKeyInfo == null) {
+ throw new OMException("No such Multipart upload is with specified " +
+ "uploadId " + uploadID,
+ OMException.ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR);
+ } else {
+ factor = partKeyInfo.getFactor();
+ type = partKeyInfo.getType();
+ }
+ // For this upload part we don't need to check in KeyTable. As this
+ // is not an actual key, it is a part of the key.
+ return createFileInfo(args, locations, factor, type, size, encInfo,
+ prefixManager, omBucketInfo, omPathInfo, transactionLogIndex);
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
new file mode 100644
index 00000000000..4e93fa734c3
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMDirectoryCreateResponseV1.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequest.Result;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+
+/**
+ * Response for create directory request.
+ */
+@CleanupTableInfo(cleanupTables = {DIRECTORY_TABLE})
+public class OMDirectoryCreateResponseV1 extends OMClientResponse {
+
+ public static final Logger LOG =
+ LoggerFactory.getLogger(OMDirectoryCreateResponseV1.class);
+
+ private OmDirectoryInfo dirInfo;
+ private List parentDirInfos;
+ private Result result;
+
+ public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+ @Nonnull OmDirectoryInfo dirInfo,
+ @Nonnull List pDirInfos,
+ @Nonnull Result result) {
+ super(omResponse);
+ this.dirInfo = dirInfo;
+ this.parentDirInfos = pDirInfos;
+ this.result = result;
+ }
+
+ /**
+ * For when the request is not successful or the directory already exists.
+ */
+ public OMDirectoryCreateResponseV1(@Nonnull OMResponse omResponse,
+ @Nonnull Result result) {
+ super(omResponse);
+ this.result = result;
+ }
+
+ @Override
+ protected void addToDBBatch(OMMetadataManager omMetadataManager,
+ BatchOperation batchOperation)
+ throws IOException {
+ addToDirectoryTable(omMetadataManager, batchOperation);
+ }
+
+ private void addToDirectoryTable(OMMetadataManager omMetadataManager,
+ BatchOperation batchOperation)
+ throws IOException {
+ if (dirInfo != null) {
+ if (parentDirInfos != null) {
+ for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+ String parentKey = omMetadataManager
+ .getOzonePathKey(parentDirInfo.getParentObjectID(),
+ parentDirInfo.getName());
+ LOG.debug("putWithBatch parent : dir {} info : {}", parentKey,
+ parentDirInfo);
+ omMetadataManager.getDirectoryTable()
+ .putWithBatch(batchOperation, parentKey, parentDirInfo);
+ }
+ }
+
+ String dirKey = omMetadataManager.getOzonePathKey(
+ dirInfo.getParentObjectID(), dirInfo.getName());
+ omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, dirKey,
+ dirInfo);
+ } else {
+ // When directory already exists, we don't add it to cache. And it is
+ // not an error, in this case dirKeyInfo will be null.
+ LOG.debug("Response Status is OK, dirKeyInfo is null in " +
+ "OMDirectoryCreateResponseV1");
+ }
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
new file mode 100644
index 00000000000..a168d8fb983
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java
@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+
+/**
+ * Response for create file request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = OPEN_FILE_TABLE)
+public class OMFileCreateResponseV1 extends OMFileCreateResponse {
+
+ private List parentDirInfos;
+
+ public OMFileCreateResponseV1(@Nonnull OMResponse omResponse,
+ @Nonnull OmKeyInfo omKeyInfo,
+ @Nonnull List parentDirInfos,
+ long openKeySessionID,
+ @Nonnull OmVolumeArgs omVolumeArgs,
+ @Nonnull OmBucketInfo omBucketInfo) {
+ super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID,
+ omVolumeArgs, omBucketInfo);
+ this.parentDirInfos = parentDirInfos;
+ }
+
+ @Override
+ public void addToDBBatch(OMMetadataManager omMetadataMgr,
+ BatchOperation batchOp) throws IOException {
+
+ /**
+ * Create parent directory entries during Key Create - do not wait
+ * for Key Commit request.
+ * XXX handle stale directory entries.
+ */
+ if (parentDirInfos != null) {
+ for (OmDirectoryInfo parentDirInfo : parentDirInfos) {
+ String parentKey = parentDirInfo.getPath();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("putWithBatch adding parent : key {} info : {}", parentKey,
+ parentDirInfo);
+ }
+ omMetadataMgr.getDirectoryTable().putWithBatch(batchOp, parentKey,
+ parentDirInfo);
+ }
+ }
+
+ OMFileRequest.addToOpenFileTable(omMetadataMgr, batchOp, getOmKeyInfo(),
+ getOpenKeySessionID());
+
+ // update volume usedBytes.
+ omMetadataMgr.getVolumeTable().putWithBatch(batchOp,
+ omMetadataMgr.getVolumeKey(getOmVolumeArgs().getVolume()),
+ getOmVolumeArgs());
+ // update bucket usedBytes.
+ omMetadataMgr.getBucketTable().putWithBatch(batchOp,
+ omMetadataMgr.getBucketKey(getOmVolumeArgs().getVolume(),
+ getOmBucketInfo().getBucketName()), getOmBucketInfo());
+ }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
index aede2ec18e9..66619008aa7 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java
@@ -86,4 +86,23 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
omBucketInfo.getBucketName()), omBucketInfo);
}
+ protected String getOpenKeyName() {
+ return openKeyName;
+ }
+
+ protected OmKeyInfo getOmKeyInfo() {
+ return omKeyInfo;
+ }
+
+ protected OmVolumeArgs getOmVolumeArgs() {
+ return omVolumeArgs;
+ }
+
+ protected OmBucketInfo getOmBucketInfo() {
+ return omBucketInfo;
+ }
+
+ protected String getOzoneKeyName() {
+ return ozoneKeyName;
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
new file mode 100644
index 00000000000..bff55545ba4
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponseV1.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_FILE_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.FILE_TABLE;
+
+/**
+ * Response for CommitKey request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = {OPEN_FILE_TABLE, FILE_TABLE})
+public class OMKeyCommitResponseV1 extends OMKeyCommitResponse {
+
+ public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse,
+ @Nonnull OmKeyInfo omKeyInfo,
+ String ozoneKeyName, String openKeyName,
+ @Nonnull OmVolumeArgs omVolumeArgs,
+ @Nonnull OmBucketInfo omBucketInfo) {
+ super(omResponse, omKeyInfo, ozoneKeyName, openKeyName, omVolumeArgs,
+ omBucketInfo);
+ }
+
+ /**
+ * For when the request is not successful.
+ * For a successful request, the other constructor should be used.
+ */
+ public OMKeyCommitResponseV1(@Nonnull OMResponse omResponse) {
+ super(omResponse);
+ checkStatusNotOK();
+ }
+
+ @Override
+ public void addToDBBatch(OMMetadataManager omMetadataManager,
+ BatchOperation batchOperation) throws IOException {
+
+ // Delete from OpenKey table
+ omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation,
+ getOpenKeyName());
+
+ OMFileRequest.addToFileTable(omMetadataManager, batchOperation,
+ getOmKeyInfo());
+
+ // update volume usedBytes.
+ omMetadataManager.getVolumeTable().putWithBatch(batchOperation,
+ omMetadataManager.getVolumeKey(getOmVolumeArgs().getVolume()),
+ getOmVolumeArgs());
+ // update bucket usedBytes.
+ omMetadataManager.getBucketTable().putWithBatch(batchOperation,
+ omMetadataManager.getBucketKey(getOmVolumeArgs().getVolume(),
+ getOmBucketInfo().getBucketName()), getOmBucketInfo());
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
index 2ae53591849..49ff8d9a580 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java
@@ -72,7 +72,7 @@ public OMKeyCreateResponse(@Nonnull OMResponse omResponse) {
}
@Override
- protected void addToDBBatch(OMMetadataManager omMetadataManager,
+ public void addToDBBatch(OMMetadataManager omMetadataManager,
BatchOperation batchOperation) throws IOException {
/**
@@ -108,5 +108,21 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager,
omMetadataManager.getBucketKey(omVolumeArgs.getVolume(),
omBucketInfo.getBucketName()), omBucketInfo);
}
+
+ protected long getOpenKeySessionID() {
+ return openKeySessionID;
+ }
+
+ protected OmKeyInfo getOmKeyInfo() {
+ return omKeyInfo;
+ }
+
+ protected OmVolumeArgs getOmVolumeArgs() {
+ return omVolumeArgs;
+ }
+
+ protected OmBucketInfo getOmBucketInfo() {
+ return omBucketInfo;
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
index 7470b378844..3b7edf13c50 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponse.java
@@ -70,4 +70,15 @@ public void addToDBBatch(OMMetadataManager omMetadataManager,
renameKeyInfo);
}
+ public OmKeyInfo getRenameKeyInfo() {
+ return renameKeyInfo;
+ }
+
+ public String getFromKeyName() {
+ return fromKeyName;
+ }
+
+ public String getToKeyName() {
+ return toKeyName;
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
new file mode 100644
index 00000000000..f8fb88d1e4b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyRenameResponseV1.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
+import org.apache.hadoop.ozone.om.response.CleanupTableInfo;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+
+import javax.annotation.Nonnull;
+import java.io.IOException;
+
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DIRECTORY_TABLE;
+import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.KEY_TABLE;
+
+/**
+ * Response for RenameKey request layout version V1.
+ */
+@CleanupTableInfo(cleanupTables = {KEY_TABLE, DIRECTORY_TABLE})
+public class OMKeyRenameResponseV1 extends OMKeyRenameResponse {
+
+ private boolean isRenameDirectory;
+
+ public OMKeyRenameResponseV1(@Nonnull OMResponse omResponse,
+ String fromKeyName, String toKeyName, @Nonnull OmKeyInfo renameKeyInfo,
+ boolean isRenameDirectory) {
+ super(omResponse, fromKeyName, toKeyName, renameKeyInfo);
+ this.isRenameDirectory = isRenameDirectory;
+ }
+
+ @Override
+ public void addToDBBatch(OMMetadataManager omMetadataManager,
+ BatchOperation batchOperation) throws IOException {
+
+ if (isRenameDirectory) {
+ omMetadataManager.getDirectoryTable().deleteWithBatch(batchOperation,
+ getFromKeyName());
+
+ OmDirectoryInfo renameDirInfo =
+ OMFileRequest.getDirectoryInfo(getRenameKeyInfo());
+ omMetadataManager.getDirectoryTable().putWithBatch(batchOperation,
+ getToKeyName(), renameDirInfo);
+
+ } else {
+ omMetadataManager.getKeyTable().deleteWithBatch(batchOperation,
+ getFromKeyName());
+ omMetadataManager.getKeyTable().putWithBatch(batchOperation,
+ getToKeyName(), getRenameKeyInfo());
+ }
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index 1be303c50a4..d5f3c325aad 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -38,6 +38,8 @@
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
@@ -65,6 +67,7 @@
import org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType;
import org.apache.hadoop.ozone.security.acl.OzoneObjInfo;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
@@ -229,6 +232,25 @@ public static void addKeyLocationInfo(
keyInfo.appendNewBlocks(Collections.singletonList(locationInfo), false);
}
+ /**
+ * Add dir key entry to DirectoryTable.
+ *
+ * @throws Exception
+ */
+ public static void addDirKeyToDirTable(boolean addToCache,
+ OmDirectoryInfo omDirInfo,
+ long trxnLogIndex,
+ OMMetadataManager omMetadataManager)
+ throws Exception {
+ String ozoneKey = omDirInfo.getPath();
+ if (addToCache) {
+ omMetadataManager.getDirectoryTable().addCacheEntry(
+ new CacheKey<>(ozoneKey),
+ new CacheValue<>(Optional.of(omDirInfo), trxnLogIndex));
+ }
+ omMetadataManager.getDirectoryTable().put(ozoneKey, omDirInfo);
+ }
+
/**
* Create OmKeyInfo.
*/
@@ -239,6 +261,22 @@ public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
replicationFactor, 0L);
}
+ /**
+ * Create OmDirectoryInfo.
+ */
+ public static OmDirectoryInfo createOmDirectoryInfo(String keyName,
+ long objectID,
+ long parentObjID) {
+ return new OmDirectoryInfo.Builder()
+ .setName(keyName)
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setObjectID(objectID)
+ .setParentObjectID(parentObjID)
+ .setUpdateID(objectID)
+ .build();
+ }
+
/**
* Create OmKeyInfo.
*/
@@ -713,4 +751,113 @@ public static void addVolumeToOM(OMMetadataManager omMetadataManager,
new CacheKey<>(dbVolumeKey),
new CacheValue<>(Optional.of(omVolumeArgs), 1L));
}
+
+ /**
+ * Create OmKeyInfo.
+ */
+ @SuppressWarnings("parameterNumber")
+ public static OmKeyInfo createOmKeyInfo(String volumeName, String bucketName,
+ String keyName, HddsProtos.ReplicationType replicationType,
+ HddsProtos.ReplicationFactor replicationFactor, long objectID,
+ long parentID, long trxnLogIndex, long creationTime) {
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ return new OmKeyInfo.Builder()
+ .setVolumeName(volumeName)
+ .setBucketName(bucketName)
+ .setKeyName(keyName)
+ .setOmKeyLocationInfos(Collections.singletonList(
+ new OmKeyLocationInfoGroup(0, new ArrayList<>())))
+ .setCreationTime(creationTime)
+ .setModificationTime(Time.now())
+ .setDataSize(1000L)
+ .setReplicationType(replicationType)
+ .setReplicationFactor(replicationFactor)
+ .setObjectID(objectID)
+ .setUpdateID(trxnLogIndex)
+ .setParentObjectID(parentID)
+ .setFileName(fileName)
+ .build();
+ }
+
+
+ /**
+ * Add key entry to KeyTable. if openKeyTable flag is true, add's entries
+ * to openKeyTable, else add's it to keyTable.
+ *
+ * @throws Exception DB failure
+ */
+ public static void addFileToKeyTable(boolean openKeyTable,
+ boolean addToCache, String fileName,
+ OmKeyInfo omKeyInfo,
+ long clientID, long trxnLogIndex,
+ OMMetadataManager omMetadataManager)
+ throws Exception {
+ if (openKeyTable) {
+ String ozoneKey = omMetadataManager.getOpenFileName(
+ omKeyInfo.getParentObjectID(), fileName, clientID);
+ if (addToCache) {
+ omMetadataManager.getOpenKeyTable().addCacheEntry(
+ new CacheKey<>(ozoneKey),
+ new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+ }
+ omMetadataManager.getOpenKeyTable().put(ozoneKey, omKeyInfo);
+ } else {
+ String ozoneKey = omMetadataManager.getOzonePathKey(
+ omKeyInfo.getParentObjectID(), fileName);
+ if (addToCache) {
+ omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+ new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex));
+ }
+ omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+ }
+ }
+
+ /**
+ * Gets bucketId from OM metadata manager.
+ *
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param omMetadataManager metadata manager
+ * @return bucket Id
+ * @throws Exception DB failure
+ */
+ public static long getBucketId(String volumeName, String bucketName,
+ OMMetadataManager omMetadataManager)
+ throws Exception {
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ return omBucketInfo.getObjectID();
+ }
+
+ /**
+ * Add path components to the directory table and returns last directory's
+ * object id.
+ *
+ * @param volumeName volume name
+ * @param bucketName bucket name
+ * @param key key name
+ * @param omMetaMgr metdata manager
+ * @return last directory object id
+ * @throws Exception
+ */
+ public static long addParentsToDirTable(String volumeName, String bucketName,
+ String key, OMMetadataManager omMetaMgr)
+ throws Exception {
+ long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+ omMetaMgr);
+ String[] pathComponents = StringUtils.split(key, '/');
+ long objectId = bucketId + 10;
+ long parentId = bucketId;
+ long txnID = 50;
+ for (String pathElement : pathComponents) {
+ OmDirectoryInfo omDirInfo =
+ TestOMRequestUtils.createOmDirectoryInfo(pathElement, ++objectId,
+ parentId);
+ TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+ txnID, omMetaMgr);
+ parentId = omDirInfo.getObjectID();
+ }
+ return parentId;
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
new file mode 100644
index 00000000000..f0f0320e985
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequestV1.java
@@ -0,0 +1,650 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import com.google.common.base.Optional;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
+import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.apache.hadoop.ozone.audit.AuditLogger;
+import org.apache.hadoop.ozone.audit.AuditMessage;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OMMetrics;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ResolvedBucket;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.OMClientResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateDirectoryRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.jetbrains.annotations.NotNull;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.UUID;
+
+import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.when;
+
+/**
+ * Test OM directory create request V1 layout version.
+ */
+public class TestOMDirectoryCreateRequestV1 {
+
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ private OzoneManager ozoneManager;
+ private OMMetrics omMetrics;
+ private OMMetadataManager omMetadataManager;
+ private AuditLogger auditLogger;
+ // Just setting ozoneManagerDoubleBuffer which does nothing.
+ private OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
+ ((response, transactionIndex) -> {
+ return null;
+ });
+
+ @Before
+ public void setup() throws Exception {
+ ozoneManager = Mockito.mock(OzoneManager.class);
+ omMetrics = OMMetrics.create();
+ OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+ ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+ folder.newFolder().getAbsolutePath());
+ ozoneConfiguration.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+ when(ozoneManager.getMetrics()).thenReturn(omMetrics);
+ when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+ auditLogger = Mockito.mock(AuditLogger.class);
+ when(ozoneManager.getAuditLogger()).thenReturn(auditLogger);
+ Mockito.doNothing().when(auditLogger).logWrite(any(AuditMessage.class));
+ when(ozoneManager.resolveBucketLink(any(KeyArgs.class),
+ any(OMClientRequest.class)))
+ .thenReturn(new ResolvedBucket(Pair.of("", ""), Pair.of("", "")));
+ }
+
+ @After
+ public void stop() {
+ omMetrics.unRegister();
+ Mockito.framework().clearInlineMocks();
+ }
+
+ @Test
+ public void testPreExecute() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ String keyName = "a/b/c";
+
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirectoryCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest =
+ omDirectoryCreateRequestV1.preExecute(ozoneManager);
+
+ // As in preExecute, we modify original request.
+ Assert.assertNotEquals(omRequest, modifiedOmRequest);
+ }
+
+ @Test
+ public void testValidateAndUpdateCache() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketID = omBucketInfo.getObjectID();
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+ == OzoneManagerProtocolProtos.Status.OK);
+ verifyDirectoriesInDB(dirs, bucketID);
+ }
+
+ @Test
+ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(VOLUME_NOT_FOUND,
+ omClientResponse.getOMResponse().getStatus());
+
+ // Key should not exist in DB
+ Assert.assertTrue("Unexpected directory entries!",
+ omMetadataManager.getDirectoryTable().isEmpty());
+
+ }
+
+ @Test
+ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+ TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+ == OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND);
+
+ // Key should not exist in DB
+ Assert.assertTrue("Unexpected directory entries!",
+ omMetadataManager.getDirectoryTable().isEmpty());
+ }
+
+ @Test
+ public void testValidateAndUpdateCacheWithSubDirectoryInPath()
+ throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketID = omBucketInfo.getObjectID();
+ int objID = 100;
+
+ //1. Create root
+ OmDirectoryInfo omDirInfo =
+ TestOMRequestUtils.createOmDirectoryInfo(dirs.get(0), objID++,
+ bucketID);
+ TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+ omMetadataManager);
+ //2. Create sub-directory under root
+ omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(dirs.get(1), objID++,
+ omDirInfo.getObjectID());
+ TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo, 5000,
+ omMetadataManager);
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+ == OzoneManagerProtocolProtos.Status.OK);
+
+ // Key should exist in DB and cache.
+ verifyDirectoriesInDB(dirs, bucketID);
+ }
+
+ @Test
+ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists()
+ throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketID = omBucketInfo.getObjectID();
+
+ // bucketID is the parent
+ long parentID = bucketID;
+
+ // add all the directories into DirectoryTable
+ for (int indx = 0; indx < dirs.size(); indx++) {
+ long objID = 100 + indx;
+ long txnID = 5000 + indx;
+ // for index=0, parentID is bucketID
+ OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+ dirs.get(indx), objID, parentID);
+ TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+ txnID, omMetadataManager);
+
+ parentID = omDirInfo.getObjectID();
+ }
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+ == OzoneManagerProtocolProtos.Status.DIRECTORY_ALREADY_EXISTS);
+
+ Assert.assertEquals("Wrong OM numKeys metrics",
+ 0, ozoneManager.getMetrics().getNumKeys());
+
+ // Key should exist in DB and doesn't added to cache.
+ verifyDirectoriesInDB(dirs, bucketID);
+ verifyDirectoriesNotInCache(dirs, bucketID);
+ }
+
+ /**
+ * Case: File exists with the same name as the requested directory.
+ * Say, requested to createDir '/a/b/c' and there is a file exists with
+ * same name.
+ */
+ @Test
+ public void testValidateAndUpdateCacheWithFilesInPath() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long parentID = omBucketInfo.getObjectID();
+
+ // add all the parent directories into DirectoryTable. This won't create
+ // the leaf node and this will be used in CreateDirectoryReq.
+ for (int indx = 0; indx < dirs.size() - 1; indx++) {
+ long objID = 100 + indx;
+ long txnID = 5000 + indx;
+ // for index=0, parentID is bucketID
+ OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+ dirs.get(indx), objID, parentID);
+ TestOMRequestUtils.addDirKeyToDirTable(false, omDirInfo,
+ txnID, omMetadataManager);
+
+ parentID = omDirInfo.getObjectID();
+ }
+
+ long objID = parentID + 100;
+ long txnID = 50000;
+
+ // Add a file into the FileTable, this is to simulate "file exists" check.
+ OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+ bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE, objID++);
+ String ozoneFileName = parentID + "/" + dirs.get(dirs.size() - 1);
+ omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneFileName),
+ new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+ omMetadataManager.getKeyTable().put(ozoneFileName, omKeyInfo);
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest =
+ omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertTrue(omClientResponse.getOMResponse().getStatus()
+ == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+ Assert.assertEquals("Wrong OM numKeys metrics",
+ 0, ozoneManager.getMetrics().getNumKeys());
+
+ // Key should not exist in DB
+ Assert.assertNotNull(omMetadataManager.getKeyTable().get(ozoneFileName));
+ // Key should not exist in DB
+ Assert.assertEquals("Wrong directories count!", 3,
+ omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+ }
+
+
+ /**
+ * Case: File exists in the given path.
+ * Say, requested to createDir '/a/b/c/d' and there is a file '/a/b' exists
+ * in the given path.
+ */
+ @Test
+ public void testValidateAndUpdateCacheWithFileExistsInGivenPath()
+ throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long parentID = omBucketInfo.getObjectID();
+
+ long objID = parentID + 100;
+ long txnID = 5000;
+
+ // for index=0, parentID is bucketID
+ OmDirectoryInfo omDirInfo = TestOMRequestUtils.createOmDirectoryInfo(
+ dirs.get(0), objID++, parentID);
+ TestOMRequestUtils.addDirKeyToDirTable(true, omDirInfo,
+ txnID, omMetadataManager);
+ parentID = omDirInfo.getObjectID();
+
+ // Add a key in second level.
+ OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
+ bucketName, keyName, HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE, objID++);
+ String ozoneKey = parentID + "/" + dirs.get(1);
+ omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(ozoneKey),
+ new CacheValue<>(Optional.of(omKeyInfo), ++txnID));
+ omMetadataManager.getKeyTable().put(ozoneKey, omKeyInfo);
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ keyName);
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest =
+ omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertTrue("Invalid response code:" +
+ omClientResponse.getOMResponse().getStatus(),
+ omClientResponse.getOMResponse().getStatus()
+ == OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS);
+
+ Assert.assertEquals("Wrong OM numKeys metrics",
+ 0, ozoneManager.getMetrics().getNumKeys());
+
+ // Key should not exist in DB
+ Assert.assertTrue(omMetadataManager.getKeyTable().get(ozoneKey) != null);
+ // Key should not exist in DB
+ Assert.assertEquals("Wrong directories count!",
+ 1, omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+ }
+
+ @Test
+ public void testCreateDirectoryUptoLimitOfMaxLevels255() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 255);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketID = omBucketInfo.getObjectID();
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ Assert.assertEquals(0L, omMetrics.getNumKeys());
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+ omClientResponse.getOMResponse().getStatus());
+
+ verifyDirectoriesInDB(dirs, bucketID);
+
+ Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+ }
+
+ @Test
+ public void testCreateDirectoryExceedLimitOfMaxLevels255() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 256);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketID = omBucketInfo.getObjectID();
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ Assert.assertEquals(0L, omMetrics.getNumKeys());
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager,
+ 100L, ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(OzoneManagerProtocolProtos.Status.INVALID_KEY_NAME,
+ omClientResponse.getOMResponse().getStatus());
+
+ Assert.assertEquals("Unexpected directories!", 0,
+ omMetadataManager.getDirectoryTable().getEstimatedKeyCount());
+
+ Assert.assertEquals(0, omMetrics.getNumKeys());
+ }
+
+ @Test
+ public void testCreateDirectoryOMMetric() throws Exception {
+ String volumeName = "vol1";
+ String bucketName = "bucket1";
+ List dirs = new ArrayList();
+ String keyName = createDirKey(dirs, 3);
+
+ // Add volume and bucket entries to DB.
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ long bucketID = omBucketInfo.getObjectID();
+
+ OMRequest omRequest = createDirectoryRequest(volumeName, bucketName,
+ OzoneFSUtils.addTrailingSlashIfNeeded(keyName));
+ OMDirectoryCreateRequestV1 omDirCreateRequestV1 =
+ new OMDirectoryCreateRequestV1(omRequest);
+
+ OMRequest modifiedOmRequest = omDirCreateRequestV1.preExecute(ozoneManager);
+
+ omDirCreateRequestV1 = new OMDirectoryCreateRequestV1(modifiedOmRequest);
+
+ Assert.assertEquals(0L, omMetrics.getNumKeys());
+ OMClientResponse omClientResponse =
+ omDirCreateRequestV1.validateAndUpdateCache(ozoneManager, 100L,
+ ozoneManagerDoubleBufferHelper);
+
+ Assert.assertEquals(OzoneManagerProtocolProtos.Status.OK,
+ omClientResponse.getOMResponse().getStatus());
+
+ verifyDirectoriesInDB(dirs, bucketID);
+
+ Assert.assertEquals(dirs.size(), omMetrics.getNumKeys());
+ }
+
+
+ @NotNull
+ private String createDirKey(List dirs, int depth) {
+ String keyName = RandomStringUtils.randomAlphabetic(5);
+ dirs.add(keyName);
+ for (int i = 0; i < depth; i++) {
+ String dirName = RandomStringUtils.randomAlphabetic(5);
+ dirs.add(dirName);
+ keyName += "/" + dirName;
+ }
+ return keyName;
+ }
+
+ private void verifyDirectoriesInDB(List dirs, long bucketID)
+ throws IOException {
+ // bucketID is the parent
+ long parentID = bucketID;
+ for (int indx = 0; indx < dirs.size(); indx++) {
+ String dirName = dirs.get(indx);
+ String dbKey = "";
+ // for index=0, parentID is bucketID
+ dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+ OmDirectoryInfo omDirInfo =
+ omMetadataManager.getDirectoryTable().get(dbKey);
+ Assert.assertNotNull("Invalid directory!", omDirInfo);
+ Assert.assertEquals("Invalid directory!", dirName, omDirInfo.getName());
+ Assert.assertEquals("Invalid dir path!",
+ parentID + "/" + dirName, omDirInfo.getPath());
+ parentID = omDirInfo.getObjectID();
+ }
+ }
+
+ private void verifyDirectoriesNotInCache(List dirs, long bucketID)
+ throws IOException {
+ // bucketID is the parent
+ long parentID = bucketID;
+ for (int indx = 0; indx < dirs.size(); indx++) {
+ String dirName = dirs.get(indx);
+ String dbKey = "";
+ // for index=0, parentID is bucketID
+ dbKey = omMetadataManager.getOzonePathKey(parentID, dirName);
+ CacheValue omDirInfoCacheValue =
+ omMetadataManager.getDirectoryTable()
+ .getCacheValue(new CacheKey<>(dbKey));
+ Assert.assertNull("Unexpected directory!", omDirInfoCacheValue);
+ }
+ }
+
+ /**
+ * Create OMRequest which encapsulates CreateDirectory request.
+ *
+ * @param volumeName
+ * @param bucketName
+ * @param keyName
+ * @return OMRequest
+ */
+ private OMRequest createDirectoryRequest(String volumeName, String bucketName,
+ String keyName) {
+ return OMRequest.newBuilder().setCreateDirectoryRequest(
+ CreateDirectoryRequest.newBuilder().setKeyArgs(
+ KeyArgs.newBuilder().setVolumeName(volumeName)
+ .setBucketName(bucketName).setKeyName(keyName)))
+ .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+ .setClientId(UUID.randomUUID().toString()).build();
+ }
+
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
index c7aa6be9aa3..5010d0a7e3f 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java
@@ -21,6 +21,7 @@
import java.util.List;
import java.util.UUID;
+import org.jetbrains.annotations.NotNull;
import org.junit.Assert;
import org.junit.Test;
@@ -55,8 +56,7 @@ public void testPreExecute() throws Exception{
HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
false, false);
- OMFileCreateRequest omFileCreateRequest =
- new OMFileCreateRequest(omRequest);
+ OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -96,8 +96,7 @@ public void testPreExecuteWithBlankKey() throws Exception{
HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
false, false);
- OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
- omRequest);
+ OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
Assert.assertNotEquals(omRequest, modifiedOmRequest);
@@ -121,21 +120,17 @@ public void testValidateAndUpdateCache() throws Exception {
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
- OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
- omRequest);
+ OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
long id = modifiedOmRequest.getCreateFileRequest().getClientID();
- String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
- keyName, id);
-
// Before calling
- OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+ OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(keyName, id, false);
Assert.assertNull(omKeyInfo);
- omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+ omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -146,8 +141,7 @@ public void testValidateAndUpdateCache() throws Exception {
// Check open table whether key is added or not.
- omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
- Assert.assertNotNull(omKeyInfo);
+ omKeyInfo = verifyPathInOpenKeyTable(keyName, id, true);
List< OmKeyLocationInfo > omKeyLocationInfoList =
omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -179,12 +173,11 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
false, true);
- OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
- omRequest);
+ OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
- omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+ omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -200,13 +193,11 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
false, true);
TestOMRequestUtils.addVolumeToDB(volumeName, omMetadataManager);
- OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
- omRequest);
+ OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
- omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
-
+ omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -311,8 +302,7 @@ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
testNonRecursivePath(key, false, false, true);
}
-
- private void testNonRecursivePath(String key,
+ protected void testNonRecursivePath(String key,
boolean overWrite, boolean recursive, boolean fail) throws Exception {
OMRequest omRequest = createFileRequest(volumeName, bucketName, key,
HddsProtos.ReplicationFactor.ONE, HddsProtos.ReplicationType.RATIS,
@@ -320,12 +310,11 @@ private void testNonRecursivePath(String key,
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
- OMFileCreateRequest omFileCreateRequest = new OMFileCreateRequest(
- omRequest);
+ OMFileCreateRequest omFileCreateRequest = getOMFileCreateRequest(omRequest);
OMRequest modifiedOmRequest = omFileCreateRequest.preExecute(ozoneManager);
- omFileCreateRequest = new OMFileCreateRequest(modifiedOmRequest);
+ omFileCreateRequest = getOMFileCreateRequest(modifiedOmRequest);
OMClientResponse omFileCreateResponse =
omFileCreateRequest.validateAndUpdateCache(ozoneManager, 100L,
@@ -341,10 +330,9 @@ private void testNonRecursivePath(String key,
Assert.assertTrue(omFileCreateResponse.getOMResponse().getSuccess());
long id = modifiedOmRequest.getCreateFileRequest().getClientID();
- String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
- key, id);
- OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
- Assert.assertNotNull(omKeyInfo);
+ verifyKeyNameInCreateFileResponse(key, omFileCreateResponse);
+
+ OmKeyInfo omKeyInfo = verifyPathInOpenKeyTable(key, id, true);
List< OmKeyLocationInfo > omKeyLocationInfoList =
omKeyInfo.getLatestVersionLocations().getLocationList();
@@ -368,6 +356,14 @@ private void testNonRecursivePath(String key,
}
}
+ private void verifyKeyNameInCreateFileResponse(String key,
+ OMClientResponse omFileCreateResponse) {
+ OzoneManagerProtocolProtos.CreateFileResponse createFileResponse =
+ omFileCreateResponse.getOMResponse().getCreateFileResponse();
+ String actualFileName = createFileResponse.getKeyInfo().getKeyName();
+ Assert.assertEquals("Incorrect keyName", key, actualFileName);
+ }
+
/**
* Create OMRequest which encapsulates OMFileCreateRequest.
* @param volumeName
@@ -377,7 +373,8 @@ private void testNonRecursivePath(String key,
* @param replicationType
* @return OMRequest
*/
- private OMRequest createFileRequest(
+ @NotNull
+ protected OMRequest createFileRequest(
String volumeName, String bucketName, String keyName,
HddsProtos.ReplicationFactor replicationFactor,
HddsProtos.ReplicationType replicationType, boolean overWrite,
@@ -399,4 +396,38 @@ private OMRequest createFileRequest(
.setCreateFileRequest(createFileRequest).build();
}
+
+ /**
+ * Verify path in open key table. Also, it returns OMKeyInfo for the given
+ * key path.
+ *
+ * @param key key name
+ * @param id client id
+ * @param doAssert if true then do assertion, otherwise it just skip.
+ * @return om key info for the given key path.
+ * @throws Exception DB failure
+ */
+ protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+ boolean doAssert)
+ throws Exception {
+ String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
+ key, id);
+ OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey);
+ if (doAssert) {
+ Assert.assertNotNull("Failed to find key in OpenKeyTable", omKeyInfo);
+ }
+ return omKeyInfo;
+ }
+
+ /**
+ * Gets OMFileCreateRequest reference.
+ *
+ * @param omRequest om request
+ * @return OMFileCreateRequest reference
+ */
+ @NotNull
+ protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest){
+ return new OMFileCreateRequest(omRequest);
+ }
+
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
new file mode 100644
index 00000000000..7ded386fdd8
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java
@@ -0,0 +1,192 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.request.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.UUID;
+
+/**
+ * Tests OMFileCreateRequest layout version V1.
+ */
+public class TestOMFileCreateRequestV1 extends TestOMFileCreateRequest {
+
+ @Test
+ public void testValidateAndUpdateCacheWithNonRecursive() throws Exception {
+ testNonRecursivePath(UUID.randomUUID().toString(), false, false, false);
+ testNonRecursivePath("a/b", false, false, true);
+ Assert.assertEquals("Invalid metrics value", 0, omMetrics.getNumKeys());
+
+ // Create parent dirs for the path
+ TestOMRequestUtils.addParentsToDirTable(volumeName, bucketName,
+ "a/b/c", omMetadataManager);
+ String fileNameD = "d";
+ TestOMRequestUtils.addKeyToTable(false, volumeName, bucketName,
+ "a/b/c/" + fileNameD, 0L, HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, omMetadataManager);
+
+ // cannot create file if directory of same name exists
+ testNonRecursivePath("a/b/c", false, false, true);
+
+ // Delete child key but retain path "a/b/ in the key table
+ OmDirectoryInfo dirPathC = getDirInfo("a/b/c");
+ Assert.assertNotNull("Failed to find dir path: a/b/c", dirPathC);
+ String dbFileD = omMetadataManager.getOzonePathKey(
+ dirPathC.getObjectID(), fileNameD);
+ omMetadataManager.getKeyTable().delete(dbFileD);
+ omMetadataManager.getKeyTable().delete(dirPathC.getPath());
+
+ // can create non-recursive because parents already exist.
+ testNonRecursivePath("a/b/e", false, false, false);
+ }
+
+ @Test
+ public void testValidateAndUpdateCacheWithRecursiveAndOverWrite()
+ throws Exception {
+ String key = "c/d/e/f";
+ // Should be able to create file even if parent directories does not exist
+ testNonRecursivePath(key, false, true, false);
+ Assert.assertEquals("Invalid metrics value", 3, omMetrics.getNumKeys());
+
+ // Add the key to key table
+ OmDirectoryInfo omDirInfo = getDirInfo("c/d/e");
+ OmKeyInfo omKeyInfo =
+ TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE,
+ omDirInfo.getObjectID() + 10,
+ omDirInfo.getObjectID(), 100, Time.now());
+ TestOMRequestUtils.addFileToKeyTable(false, false,
+ "f", omKeyInfo, -1,
+ omDirInfo.getObjectID() + 10, omMetadataManager);
+
+ // Even if key exists, should be able to create file as overwrite is set
+ // to true
+ testNonRecursivePath(key, true, true, false);
+ testNonRecursivePath(key, false, true, true);
+ }
+
+ @Test
+ public void testValidateAndUpdateCacheWithNonRecursiveAndOverWrite()
+ throws Exception {
+ String parentDir = "c/d/e";
+ String fileName = "f";
+ String key = parentDir + "/" + fileName;
+ TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
+ omMetadataManager);
+ // Create parent dirs for the path
+ long parentId = TestOMRequestUtils.addParentsToDirTable(volumeName,
+ bucketName, parentDir, omMetadataManager);
+
+ // Need to add the path which starts with "c/d/e" to OpenKeyTable as this is
+ // non-recursive parent should exist.
+ testNonRecursivePath(key, false, false, false);
+
+ OmKeyInfo omKeyInfo =
+ TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, key,
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE,
+ parentId + 1,
+ parentId, 100, Time.now());
+ TestOMRequestUtils.addFileToKeyTable(false, false,
+ fileName, omKeyInfo, -1, 50, omMetadataManager);
+
+ // Even if key exists in KeyTable, should be able to create file as
+ // overwrite is set to true
+ testNonRecursivePath(key, true, false, false);
+ testNonRecursivePath(key, false, false, true);
+ }
+
+ protected OmKeyInfo verifyPathInOpenKeyTable(String key, long id,
+ boolean doAssert)
+ throws Exception {
+ long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+ omMetadataManager);
+ String[] pathComponents = StringUtils.split(key, '/');
+ long parentId = bucketId;
+ for (int indx = 0; indx < pathComponents.length; indx++) {
+ String pathElement = pathComponents[indx];
+ // Reached last component, which is file name
+ if (indx == pathComponents.length - 1) {
+ String dbOpenFileName = omMetadataManager.getOpenFileName(
+ parentId, pathElement, id);
+ OmKeyInfo omKeyInfo = omMetadataManager.getOpenKeyTable()
+ .get(dbOpenFileName);
+ if (doAssert) {
+ Assert.assertNotNull("Invalid key!", omKeyInfo);
+ }
+ return omKeyInfo;
+ } else {
+ // directory
+ String dbKey = omMetadataManager.getOzonePathKey(parentId,
+ pathElement);
+ OmDirectoryInfo dirInfo =
+ omMetadataManager.getDirectoryTable().get(dbKey);
+ parentId = dirInfo.getObjectID();
+ }
+ }
+ if (doAssert) {
+ Assert.fail("Invalid key!");
+ }
+ return null;
+ }
+
+ private OmDirectoryInfo getDirInfo(String key)
+ throws Exception {
+ long bucketId = TestOMRequestUtils.getBucketId(volumeName, bucketName,
+ omMetadataManager);
+ String[] pathComponents = StringUtils.split(key, '/');
+ long parentId = bucketId;
+ OmDirectoryInfo dirInfo = null;
+ for (int indx = 0; indx < pathComponents.length; indx++) {
+ String pathElement = pathComponents[indx];
+ // Reached last component, which is file name
+ // directory
+ String dbKey = omMetadataManager.getOzonePathKey(parentId,
+ pathElement);
+ dirInfo =
+ omMetadataManager.getDirectoryTable().get(dbKey);
+ parentId = dirInfo.getObjectID();
+ }
+ return dirInfo;
+ }
+
+ @NotNull
+ @Override
+ protected OzoneConfiguration getOzoneConfiguration() {
+ OzoneConfiguration config = super.getOzoneConfiguration();
+ config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ return config;
+ }
+
+ protected OMFileCreateRequest getOMFileCreateRequest(OMRequest omRequest) {
+ return new OMFileCreateRequestV1(omRequest);
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
index b327b76e513..09d499e3c9e 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequest.java
@@ -19,12 +19,15 @@
package org.apache.hadoop.ozone.om.request.key;
+import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.stream.Collectors;
import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.jetbrains.annotations.NotNull;
import org.junit.Assert;
import org.junit.Test;
@@ -48,6 +51,8 @@
*/
public class TestOMKeyCommitRequest extends TestOMKeyRequest {
+ private String parentDir;
+
@Test
public void testPreExecute() throws Exception {
doPreExecute(createCommitKeyRequest());
@@ -56,20 +61,15 @@ public void testPreExecute() throws Exception {
@Test
public void testValidateAndUpdateCache() throws Exception {
- OMRequest modifiedOmRequest =
- doPreExecute(createCommitKeyRequest());
+ OMRequest modifiedOmRequest = doPreExecute(createCommitKeyRequest());
OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(modifiedOmRequest);
+ getOmKeyCommitRequest(modifiedOmRequest);
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
- clientID, replicationType, replicationFactor, omMetadataManager);
-
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
+ String ozoneKey = addKeyToOpenKeyTable();
// Key should not be there in key table, as validateAndUpdateCache is
// still not called.
@@ -92,6 +92,8 @@ public void testValidateAndUpdateCache() throws Exception {
omKeyInfo = omMetadataManager.getKeyTable().get(ozoneKey);
Assert.assertNotNull(omKeyInfo);
+ // DB keyInfo format
+ verifyKeyName(omKeyInfo);
// Check modification time
@@ -107,7 +109,14 @@ public void testValidateAndUpdateCache() throws Exception {
Assert.assertEquals(locationInfoListFromCommitKeyRequest,
omKeyInfo.getLatestVersionLocations().getLocationList());
+ }
+ @Test
+ public void testValidateAndUpdateCacheWithSubDirs() throws Exception {
+ parentDir = "dir1/dir2/dir3/";
+ keyName = parentDir + UUID.randomUUID().toString();
+
+ testValidateAndUpdateCache();
}
@Test
@@ -117,10 +126,9 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception {
doPreExecute(createCommitKeyRequest());
OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(modifiedOmRequest);
+ getOmKeyCommitRequest(modifiedOmRequest);
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
+ String ozoneKey = getOzonePathKey();
// Key should not be there in key table, as validateAndUpdateCache is
// still not called.
@@ -147,13 +155,11 @@ public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception {
doPreExecute(createCommitKeyRequest());
OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(modifiedOmRequest);
-
+ getOmKeyCommitRequest(modifiedOmRequest);
TestOMRequestUtils.addVolumeToDB(volumeName, OzoneConsts.OZONE,
omMetadataManager);
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
+ String ozoneKey = getOzonePathKey();
// Key should not be there in key table, as validateAndUpdateCache is
// still not called.
@@ -180,14 +186,12 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception {
doPreExecute(createCommitKeyRequest());
OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(modifiedOmRequest);
-
+ getOmKeyCommitRequest(modifiedOmRequest);
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
omMetadataManager);
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
+ String ozoneKey = getOzonePathKey();
// Key should not be there in key table, as validateAndUpdateCache is
// still not called.
@@ -216,7 +220,7 @@ public void testValidateAndUpdateCacheWithKeyNotFound() throws Exception {
private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception {
OMKeyCommitRequest omKeyCommitRequest =
- new OMKeyCommitRequest(originalOMRequest);
+ getOmKeyCommitRequest(originalOMRequest);
OMRequest modifiedOmRequest = omKeyCommitRequest.preExecute(ozoneManager);
@@ -294,4 +298,34 @@ private List getKeyLocation() {
return keyLocations;
}
+ protected String getParentDir() {
+ return parentDir;
+ }
+
+ @NotNull
+ protected String getOzonePathKey() throws IOException {
+ return omMetadataManager.getOzoneKey(volumeName, bucketName,
+ keyName);
+ }
+
+ @NotNull
+ protected String addKeyToOpenKeyTable() throws Exception {
+ TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+ clientID, replicationType, replicationFactor, omMetadataManager);
+
+ return getOzonePathKey();
+ }
+
+ @NotNull
+ protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+ return new OMKeyCommitRequest(omRequest);
+ }
+
+ protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+ Assert.assertEquals("Incorrect KeyName", keyName,
+ omKeyInfo.getKeyName());
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ Assert.assertEquals("Incorrect FileName", fileName,
+ omKeyInfo.getFileName());
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
new file mode 100644
index 00000000000..f5168e1a3f0
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+package org.apache.hadoop.ozone.om.request.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+import java.io.IOException;
+
+/**
+ * Class tests OMKeyCommitRequestV1 class layout version V1.
+ */
+public class TestOMKeyCommitRequestV1 extends TestOMKeyCommitRequest {
+
+ private long parentID = Long.MIN_VALUE;
+
+ private long getBucketID() throws java.io.IOException {
+ String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName);
+ OmBucketInfo omBucketInfo =
+ omMetadataManager.getBucketTable().get(bucketKey);
+ if(omBucketInfo!= null){
+ return omBucketInfo.getObjectID();
+ }
+ // bucket doesn't exists in DB
+ return Long.MIN_VALUE;
+ }
+
+ @Override
+ protected String getOzonePathKey() throws IOException {
+ long bucketID = getBucketID();
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ return omMetadataManager.getOzonePathKey(bucketID, fileName);
+ }
+
+ @Override
+ protected String addKeyToOpenKeyTable() throws Exception {
+ // need to initialize parentID
+ if (getParentDir() == null) {
+ parentID = getBucketID();
+ } else {
+ parentID = TestOMRequestUtils.addParentsToDirTable(volumeName,
+ bucketName, getParentDir(), omMetadataManager);
+ }
+ long objectId = 100;
+
+ OmKeyInfo omKeyInfoV1 =
+ TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+ Time.now());
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ TestOMRequestUtils.addFileToKeyTable(true, false,
+ fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+
+ return omMetadataManager.getOzonePathKey(parentID, fileName);
+ }
+
+ @NotNull
+ @Override
+ protected OzoneConfiguration getOzoneConfiguration() {
+ OzoneConfiguration config = super.getOzoneConfiguration();
+ config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ return config;
+ }
+
+ @NotNull
+ protected OMKeyCommitRequest getOmKeyCommitRequest(OMRequest omRequest) {
+ return new OMKeyCommitRequestV1(omRequest);
+ }
+
+ protected void verifyKeyName(OmKeyInfo omKeyInfo) {
+ // V1 format - stores fileName in the keyName DB field.
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ Assert.assertEquals("Incorrect FileName", fileName,
+ omKeyInfo.getFileName());
+ Assert.assertEquals("Incorrect KeyName", fileName,
+ omKeyInfo.getKeyName());
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
index 116ba5ce8fb..5f15c504776 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs;
+import org.jetbrains.annotations.NotNull;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@@ -89,6 +90,7 @@ public class TestOMKeyRequest {
protected long clientID;
protected long scmBlockSize = 1000L;
protected long dataSize;
+ protected long txnLogId = 100000L;
// Just setting ozoneManagerDoubleBuffer which does nothing.
protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper =
@@ -101,7 +103,7 @@ public class TestOMKeyRequest {
public void setup() throws Exception {
ozoneManager = Mockito.mock(OzoneManager.class);
omMetrics = OMMetrics.create();
- OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+ OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -169,6 +171,11 @@ public void setup() throws Exception {
.thenReturn(new ResolvedBucket(volumeAndBucket, volumeAndBucket));
}
+ @NotNull
+ protected OzoneConfiguration getOzoneConfiguration() {
+ return new OzoneConfiguration();
+ }
+
@After
public void stop() {
omMetrics.unRegister();
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
new file mode 100644
index 00000000000..0a1114a8ff3
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMDirectoryCreateResponseV1.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.db.BatchOperation;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestV1;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import java.util.ArrayList;
+import java.util.UUID;
+
+/**
+ * Tests OMDirectoryCreateResponseV1 new layout version.
+ */
+public class TestOMDirectoryCreateResponseV1 {
+ @Rule
+ public TemporaryFolder folder = new TemporaryFolder();
+
+ private OMMetadataManager omMetadataManager;
+ private BatchOperation batchOperation;
+
+ @Before
+ public void setup() throws Exception {
+ OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+ ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+ folder.newFolder().getAbsolutePath());
+ omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+ batchOperation = omMetadataManager.getStore().initBatchOperation();
+ }
+
+ @Test
+ public void testAddToDBBatch() throws Exception {
+
+ String volumeName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+
+ long parentID = 100;
+ OmDirectoryInfo omDirInfo =
+ TestOMRequestUtils.createOmDirectoryInfo(keyName, 500, parentID);
+
+ OMResponse omResponse = OMResponse.newBuilder().setCreateDirectoryResponse(
+ OzoneManagerProtocolProtos.CreateDirectoryResponse.getDefaultInstance())
+ .setStatus(OzoneManagerProtocolProtos.Status.OK)
+ .setCmdType(OzoneManagerProtocolProtos.Type.CreateDirectory)
+ .build();
+
+ OMDirectoryCreateResponseV1 omDirectoryCreateResponseV1 =
+ new OMDirectoryCreateResponseV1(omResponse, omDirInfo,
+ new ArrayList<>(), OMDirectoryCreateRequestV1.Result.SUCCESS);
+
+ omDirectoryCreateResponseV1.addToDBBatch(omMetadataManager, batchOperation);
+
+ // Do manual commit and see whether addToBatch is successful or not.
+ omMetadataManager.getStore().commitBatchOperation(batchOperation);
+
+ Assert.assertNotNull(omMetadataManager.getDirectoryTable().get(
+ omMetadataManager.getOzonePathKey(parentID, keyName)));
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
new file mode 100644
index 00000000000..d2ab4658242
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/file/TestOMFileCreateResponseV1.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.file;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.om.response.key.OMKeyCreateResponse;
+import org.apache.hadoop.ozone.om.response.key.TestOMKeyCreateResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests MKeyCreateResponse layout version V1.
+ */
+public class TestOMFileCreateResponseV1 extends TestOMKeyCreateResponse {
+
+ @NotNull
+ @Override
+ protected OmKeyInfo getOmKeyInfo() {
+ Assert.assertNotNull(omBucketInfo);
+ return TestOMRequestUtils.createOmKeyInfo(volumeName,
+ omBucketInfo.getBucketName(), keyName, replicationType,
+ replicationFactor,
+ omBucketInfo.getObjectID() + 1,
+ omBucketInfo.getObjectID(), 100, Time.now());
+ }
+
+ @NotNull
+ @Override
+ protected String getOpenKeyName() {
+ Assert.assertNotNull(omBucketInfo);
+ return omMetadataManager.getOpenFileName(
+ omBucketInfo.getObjectID(), keyName, clientID);
+ }
+
+ @NotNull
+ @Override
+ protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+ OmVolumeArgs volumeArgs, OmBucketInfo bucketInfo, OMResponse response) {
+
+ return new OMFileCreateResponseV1(response, keyInfo, null, clientID,
+ volumeArgs, bucketInfo);
+ }
+
+ @NotNull
+ @Override
+ protected OzoneConfiguration getOzoneConfiguration() {
+ OzoneConfiguration config = super.getOzoneConfiguration();
+ config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ return config;
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
index ab425f2f60d..a8b3147d47b 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java
@@ -21,6 +21,7 @@
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
import org.junit.Assert;
import org.junit.Test;
@@ -31,20 +32,23 @@
/**
* Tests OMKeyCommitResponse.
*/
+@SuppressWarnings("visibilitymodifier")
public class TestOMKeyCommitResponse extends TestOMKeyResponse {
+ protected OmBucketInfo omBucketInfo;
+
@Test
public void testAddToDBBatch() throws Exception {
- OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
- bucketName, keyName, replicationType, replicationFactor);
OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
.setOwnerName(keyName).setAdminName(keyName)
.setVolume(volumeName).setCreationTime(Time.now()).build();
- OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+ omBucketInfo = OmBucketInfo.newBuilder()
.setVolumeName(volumeName).setBucketName(bucketName)
.setCreationTime(Time.now()).build();
+ OmKeyInfo omKeyInfo = getOmKeyInfo();
+
OzoneManagerProtocolProtos.OMResponse omResponse =
OzoneManagerProtocolProtos.OMResponse.newBuilder().setCommitKeyResponse(
OzoneManagerProtocolProtos.CommitKeyResponse.getDefaultInstance())
@@ -54,17 +58,14 @@ public void testAddToDBBatch() throws Exception {
// As during commit Key, entry will be already there in openKeyTable.
// Adding it here.
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
- clientID, replicationType, replicationFactor, omMetadataManager);
+ addKeyToOpenKeyTable();
- String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
- keyName, clientID);
+ String openKey = getOpenKeyName();
Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
- OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
- omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs, omBucketInfo);
+ String ozoneKey = getOzoneKey();
+ OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+ omVolumeArgs, omKeyInfo, omResponse, openKey, ozoneKey);
omKeyCommitResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -73,8 +74,7 @@ public void testAddToDBBatch() throws Exception {
// When key commit key is deleted from openKey table and added to keyTable.
Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
- Assert.assertTrue(omMetadataManager.getKeyTable().isExist(
- omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+ Assert.assertTrue(omMetadataManager.getKeyTable().isExist(ozoneKey));
}
@Test
@@ -85,7 +85,7 @@ public void testAddToDBBatchNoOp() throws Exception {
OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
.setOwnerName(keyName).setAdminName(keyName)
.setVolume(volumeName).setCreationTime(Time.now()).build();
- OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+ omBucketInfo = OmBucketInfo.newBuilder()
.setVolumeName(volumeName).setBucketName(bucketName)
.setCreationTime(Time.now()).build();
@@ -96,18 +96,15 @@ public void testAddToDBBatchNoOp() throws Exception {
.setCmdType(OzoneManagerProtocolProtos.Type.CommitKey)
.build();
- String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
- keyName, clientID);
- String ozoneKey = omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName);
+ String openKey = getOpenKeyName();
+ String ozoneKey = getOzoneKey();
- OMKeyCommitResponse omKeyCommitResponse = new OMKeyCommitResponse(
- omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs, omBucketInfo);
+ OMKeyCommitResponse omKeyCommitResponse = getOmKeyCommitResponse(
+ omVolumeArgs, omKeyInfo, omResponse, openKey, ozoneKey);
// As during commit Key, entry will be already there in openKeyTable.
// Adding it here.
- TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
- clientID, replicationType, replicationFactor, omMetadataManager);
+ addKeyToOpenKeyTable();
Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
@@ -120,7 +117,30 @@ public void testAddToDBBatchNoOp() throws Exception {
// As omResponse is error it is a no-op. So, entry should still be in
// openKey table.
Assert.assertTrue(omMetadataManager.getOpenKeyTable().isExist(openKey));
- Assert.assertFalse(omMetadataManager.getKeyTable().isExist(
- omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)));
+ Assert.assertFalse(omMetadataManager.getKeyTable().isExist(ozoneKey));
+ }
+
+ @NotNull
+ protected void addKeyToOpenKeyTable() throws Exception {
+ TestOMRequestUtils.addKeyToTable(true, volumeName, bucketName, keyName,
+ clientID, replicationType, replicationFactor, omMetadataManager);
+ }
+
+ @NotNull
+ protected String getOzoneKey() {
+ Assert.assertNotNull(omBucketInfo);
+ return omMetadataManager.getOzoneKey(volumeName,
+ omBucketInfo.getBucketName(), keyName);
+ }
+
+ @NotNull
+ protected OMKeyCommitResponse getOmKeyCommitResponse(
+ OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
+ OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+ String ozoneKey) {
+ Assert.assertNotNull(omBucketInfo);
+ return new OMKeyCommitResponse(
+ omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+ omBucketInfo);
}
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
new file mode 100644
index 00000000000..369faa96b0c
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponseV1.java
@@ -0,0 +1,101 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om.response.key;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
+import org.junit.Assert;
+
+/**
+ * Tests OMKeyCommitResponse layout version V1.
+ */
+public class TestOMKeyCommitResponseV1 extends TestOMKeyCommitResponse {
+
+ @NotNull
+ protected OMKeyCommitResponse getOmKeyCommitResponse(
+ OmVolumeArgs omVolumeArgs, OmKeyInfo omKeyInfo,
+ OzoneManagerProtocolProtos.OMResponse omResponse, String openKey,
+ String ozoneKey) {
+ Assert.assertNotNull(omBucketInfo);
+ return new OMKeyCommitResponseV1(
+ omResponse, omKeyInfo, ozoneKey, openKey, omVolumeArgs,
+ omBucketInfo);
+ }
+
+ @NotNull
+ @Override
+ protected OmKeyInfo getOmKeyInfo() {
+ Assert.assertNotNull(omBucketInfo);
+ return TestOMRequestUtils.createOmKeyInfo(volumeName,
+ omBucketInfo.getBucketName(), keyName, replicationType,
+ replicationFactor,
+ omBucketInfo.getObjectID() + 1,
+ omBucketInfo.getObjectID(), 100, Time.now());
+ }
+
+ @NotNull
+ @Override
+ protected void addKeyToOpenKeyTable() throws Exception {
+ Assert.assertNotNull(omBucketInfo);
+ long parentID = omBucketInfo.getObjectID();
+ long objectId = parentID + 10;
+
+ OmKeyInfo omKeyInfoV1 =
+ TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.ONE, objectId, parentID, 100,
+ Time.now());
+
+ String fileName = OzoneFSUtils.getFileName(keyName);
+ TestOMRequestUtils.addFileToKeyTable(true, false,
+ fileName, omKeyInfoV1, clientID, txnLogId, omMetadataManager);
+ }
+
+ @NotNull
+ @Override
+ protected String getOpenKeyName() {
+ Assert.assertNotNull(omBucketInfo);
+ return omMetadataManager.getOpenFileName(
+ omBucketInfo.getObjectID(), keyName, clientID);
+ }
+
+ @NotNull
+ @Override
+ protected String getOzoneKey() {
+ Assert.assertNotNull(omBucketInfo);
+ return omMetadataManager.getOzonePathKey(omBucketInfo.getObjectID(),
+ keyName);
+ }
+
+ @NotNull
+ @Override
+ protected OzoneConfiguration getOzoneConfiguration() {
+ OzoneConfiguration config = super.getOzoneConfiguration();
+ config.set(OMConfigKeys.OZONE_OM_LAYOUT_VERSION, "V1");
+ return config;
+ }
+}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
index 6357000f7cf..8b2dc8a0c52 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java
@@ -21,11 +21,11 @@
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.util.Time;
+import org.jetbrains.annotations.NotNull;
import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
-import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.CreateKeyResponse;
@@ -41,16 +41,15 @@ public class TestOMKeyCreateResponse extends TestOMKeyResponse {
@Test
public void testAddToDBBatch() throws Exception {
- OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
- bucketName, keyName, replicationType, replicationFactor);
-
OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
.setOwnerName(keyName).setAdminName(keyName)
.setVolume(volumeName).setCreationTime(Time.now()).build();
- OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+ omBucketInfo = OmBucketInfo.newBuilder()
.setVolumeName(volumeName).setBucketName(bucketName)
.setCreationTime(Time.now()).build();
+ OmKeyInfo omKeyInfo = getOmKeyInfo();
+
OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
CreateKeyResponse.getDefaultInstance())
.setStatus(OzoneManagerProtocolProtos.Status.OK)
@@ -58,11 +57,11 @@ public void testAddToDBBatch() throws Exception {
.build();
OMKeyCreateResponse omKeyCreateResponse =
- new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
- omVolumeArgs, omBucketInfo);
+ getOmKeyCreateResponse(omKeyInfo, omVolumeArgs, omBucketInfo,
+ omResponse);
+
+ String openKey = getOpenKeyName();
- String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
- keyName, clientID);
Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
omKeyCreateResponse.addToDBBatch(omMetadataManager, batchOperation);
@@ -74,16 +73,16 @@ public void testAddToDBBatch() throws Exception {
@Test
public void testAddToDBBatchWithErrorResponse() throws Exception {
- OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName,
- bucketName, keyName, replicationType, replicationFactor);
OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder()
.setOwnerName(keyName).setAdminName(keyName)
.setVolume(volumeName).setCreationTime(Time.now()).build();
- OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder()
+ omBucketInfo = OmBucketInfo.newBuilder()
.setVolumeName(volumeName).setBucketName(bucketName)
.setCreationTime(Time.now()).build();
+ OmKeyInfo omKeyInfo = getOmKeyInfo();
+
OMResponse omResponse = OMResponse.newBuilder().setCreateKeyResponse(
CreateKeyResponse.getDefaultInstance())
.setStatus(OzoneManagerProtocolProtos.Status.KEY_NOT_FOUND)
@@ -91,12 +90,11 @@ public void testAddToDBBatchWithErrorResponse() throws Exception {
.build();
OMKeyCreateResponse omKeyCreateResponse =
- new OMKeyCreateResponse(omResponse, omKeyInfo, null, clientID,
- omVolumeArgs, omBucketInfo);
+ getOmKeyCreateResponse(omKeyInfo, omVolumeArgs, omBucketInfo,
+ omResponse);
// Before calling addToDBBatch
- String openKey = omMetadataManager.getOpenKey(volumeName, bucketName,
- keyName, clientID);
+ String openKey = getOpenKeyName();
Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
omKeyCreateResponse.checkAndUpdateDB(omMetadataManager, batchOperation);
@@ -108,4 +106,12 @@ public void testAddToDBBatchWithErrorResponse() throws Exception {
Assert.assertFalse(omMetadataManager.getOpenKeyTable().isExist(openKey));
}
+
+ @NotNull
+ protected OMKeyCreateResponse getOmKeyCreateResponse(OmKeyInfo keyInfo,
+ OmVolumeArgs volumeArgs, OmBucketInfo bucketInfo, OMResponse response) {
+
+ return new OMKeyCreateResponse(response, keyInfo, null, clientID,
+ volumeArgs, bucketInfo);
+ }
}
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
index 626a3ded67e..d29bd7eee12 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java
@@ -20,6 +20,10 @@
import java.util.UUID;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import org.jetbrains.annotations.NotNull;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
@@ -49,11 +53,13 @@ public class TestOMKeyResponse {
protected String keyName;
protected HddsProtos.ReplicationFactor replicationFactor;
protected HddsProtos.ReplicationType replicationType;
+ protected OmBucketInfo omBucketInfo;
protected long clientID;
+ protected long txnLogId = 100000L;
@Before
public void setup() throws Exception {
- OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+ OzoneConfiguration ozoneConfiguration = getOzoneConfiguration();
ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
folder.newFolder().getAbsolutePath());
omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
@@ -67,6 +73,23 @@ public void setup() throws Exception {
clientID = 1000L;
}
+ @NotNull
+ protected String getOpenKeyName() {
+ return omMetadataManager.getOpenKey(volumeName, bucketName, keyName,
+ clientID);
+ }
+
+ @NotNull
+ protected OmKeyInfo getOmKeyInfo() {
+ return TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName,
+ replicationType, replicationFactor);
+ }
+
+ @NotNull
+ protected OzoneConfiguration getOzoneConfiguration() {
+ return new OzoneConfiguration();
+ }
+
@After
public void stop() {
Mockito.framework().clearInlineMocks();
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 0b5098886f6..dbc1f627482 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.ozone.client.OzoneKey;
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -521,4 +522,8 @@ private BlockLocation[] getBlockLocations(OzoneFileStatus fileStatus) {
return blockLocations;
}
+ @Override
+ public String getLayoutVersion() {
+ return bucket.getMetadata().get(OMConfigKeys.OZONE_OM_LAYOUT_VERSION);
+ }
}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index e4acabc2144..dccc9d85203 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.security.UserGroupInformation;
@@ -309,6 +310,13 @@ boolean processKey(List keyList) throws IOException {
*/
@Override
public boolean rename(Path src, Path dst) throws IOException {
+
+ String layOutVersion = adapter.getLayoutVersion();
+ if (layOutVersion != null &&
+ OMConfigKeys.OZONE_OM_LAYOUT_VERSION_V1.equals(layOutVersion)) {
+ return renameV1(src, dst);
+ }
+
incrementCounter(Statistic.INVOCATION_RENAME, 1);
statistics.incrementWriteOps(1);
super.checkPath(src);
@@ -407,6 +415,29 @@ public boolean rename(Path src, Path dst) throws IOException {
return result;
}
+ private boolean renameV1(Path src, Path dst) throws IOException {
+ incrementCounter(Statistic.INVOCATION_RENAME);
+ statistics.incrementWriteOps(1);
+ super.checkPath(src);
+ super.checkPath(dst);
+
+ String srcPath = src.toUri().getPath();
+ String dstPath = dst.toUri().getPath();
+ if (srcPath.equals(dstPath)) {
+ return true;
+ }
+
+ LOG.trace("rename() from:{} to:{}", src, dst);
+ if (src.isRoot()) {
+ // Cannot rename root of file system
+ LOG.trace("Cannot rename the root of a filesystem");
+ return false;
+ }
+
+ adapter.renameKey(srcPath, dstPath);
+ return true;
+ }
+
/**
* Intercept rename to trash calls from TrashPolicyDefault,
* convert them to delete calls instead.
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 848119d5f55..76de3962e29 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -57,6 +57,7 @@
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -1028,4 +1029,10 @@ private static FileStatusAdapter getFileStatusAdapterForRoot(URI uri) {
null, null, null, new BlockLocation[0]
);
}
+
+ @Override
+ public String getLayoutVersion() {
+ // TODO: Need to refine this part.
+ return OMConfigKeys.OZONE_OM_LAYOUT_VERSION_DEFAULT;
+ }
}
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 2b76c22c4d5..55d1a07809e 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -75,4 +75,5 @@ Token getDelegationToken(String renewer)
FileStatusAdapter getFileStatus(String key, URI uri,
Path qualifiedPath, String userName) throws IOException;
+ String getLayoutVersion();
}