Skip to content

Commit

Permalink
HDFS-7720. Quota by Storage Type API, tools and ClientNameNode Protoc…
Browse files Browse the repository at this point in the history
…ol changes. (Contributed by Xiaoyu Yao)
  • Loading branch information
arp7 committed Feb 7, 2015
1 parent da2fb2b commit 8de80ff
Show file tree
Hide file tree
Showing 14 changed files with 251 additions and 27 deletions.
3 changes: 3 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Expand Up @@ -20,6 +20,9 @@ Trunk (Unreleased)

HDFS-3689. Add support for variable length block. (jing9)

HDFS-7584. Quota by Storage Type API, tools and ClientNameNode Protocol
changes. (Xiaoyu Yao via Arpit Agarwal)

IMPROVEMENTS

HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
Expand Down
Expand Up @@ -167,6 +167,7 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.QuotaByStorageTypeExceededException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
Expand Down Expand Up @@ -3026,7 +3027,7 @@ ContentSummary getContentSummary(String src) throws IOException {

/**
* Sets or resets quotas for a directory.
* @see ClientProtocol#setQuota(String, long, long)
* @see ClientProtocol#setQuota(String, long, long, StorageType)
*/
void setQuota(String src, long namespaceQuota, long diskspaceQuota)
throws IOException {
Expand All @@ -3042,7 +3043,8 @@ void setQuota(String src, long namespaceQuota, long diskspaceQuota)
}
TraceScope scope = getPathTraceScope("setQuota", src);
try {
namenode.setQuota(src, namespaceQuota, diskspaceQuota);
// Pass null as storage type for traditional space/namespace quota.
namenode.setQuota(src, namespaceQuota, diskspaceQuota, null);
} catch(RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
Expand All @@ -3055,6 +3057,34 @@ void setQuota(String src, long namespaceQuota, long diskspaceQuota)
}
}

/**
* Sets or resets quotas by storage type for a directory.
* @see ClientProtocol#setQuota(String, long, long, StorageType)
*/
void setQuotaByStorageType(String src, StorageType type, long spaceQuota)
throws IOException {
if (spaceQuota <= 0 && spaceQuota != HdfsConstants.QUOTA_DONT_SET &&
spaceQuota != HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Invalid values for quota :" +
spaceQuota);
}
if (type == null) {
throw new IllegalArgumentException("Invalid storage type(null)");
}
if (!type.supportTypeQuota()) {
throw new IllegalArgumentException("Don't support Quota for storage type : "
+ type.toString());
}
try {
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota, type);
} catch (RemoteException re) {
throw re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
QuotaByStorageTypeExceededException.class,
UnresolvedPathException.class,
SnapshotAccessControlException.class);
}
}
/**
* set the modification and access time of a file
*
Expand Down
Expand Up @@ -605,6 +605,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_STORAGE_POLICY_ENABLED_KEY = "dfs.storage.policy.enabled";
public static final boolean DFS_STORAGE_POLICY_ENABLED_DEFAULT = true;

public static final String DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY = "dfs.quota.by.storage.type.enabled";
public static final boolean DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT = true;

// HA related configuration
public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes";
public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id";
Expand Down
Expand Up @@ -688,7 +688,7 @@ public ContentSummary next(final FileSystem fs, final Path p)
}

/** Set a directory's quotas
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long, StorageType)
*/
public void setQuota(Path src, final long namespaceQuota,
final long diskspaceQuota) throws IOException {
Expand All @@ -710,6 +710,35 @@ public Void next(final FileSystem fs, final Path p)
}.resolve(this, absF);
}

/**
* Set the per type storage quota of a directory.
*
* @param src target directory whose quota is to be modified.
* @param type storage type of the specific storage type quota to be modified.
* @param spaceQuota value of the specific storage type quota to be modified.
* Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type.
*/
public void setQuotaByStorageType(
Path src, final StorageType type, final long spaceQuota)
throws IOException {
Path absF = fixRelativePart(src);
new FileSystemLinkResolver<Void>() {
@Override
public Void doCall(final Path p)
throws IOException, UnresolvedLinkException {
dfs.setQuotaByStorageType(getPathName(p), type, spaceQuota);
return null;
}
@Override
public Void next(final FileSystem fs, final Path p)
throws IOException {
// setQuotaByStorageType is not defined in FileSystem, so we only can resolve
// within this DFS
return doCall(p);
}
}.resolve(this, absF);
}

private FileStatus[] listStatusInternal(Path p) throws IOException {
String src = getPathName(p);

Expand Down
Expand Up @@ -45,12 +45,18 @@ public enum StorageType {

private static final StorageType[] VALUES = values();

StorageType(boolean isTransient) { this.isTransient = isTransient; }
StorageType(boolean isTransient) {
this.isTransient = isTransient;
}

public boolean isTransient() {
return isTransient;
}

public boolean supportTypeQuota() {
return !isTransient;
}

public boolean isMovable() {
return !isTransient;
}
Expand All @@ -60,12 +66,28 @@ public static List<StorageType> asList() {
}

public static List<StorageType> getMovableTypes() {
List<StorageType> movableTypes = new ArrayList<StorageType>();
return getNonTransientTypes();
}

public static List<StorageType> getTypesSupportingQuota() {
return getNonTransientTypes();
}

public static StorageType parseStorageType(int i) {
return VALUES[i];
}

public static StorageType parseStorageType(String s) {
return StorageType.valueOf(s.toUpperCase());
}

private static List<StorageType> getNonTransientTypes() {
List<StorageType> nonTransientTypes = new ArrayList<>();
for (StorageType t : VALUES) {
if ( t.isTransient == false ) {
movableTypes.add(t);
nonTransientTypes.add(t);
}
}
return movableTypes;
return nonTransientTypes;
}
}
}
Expand Up @@ -31,6 +31,7 @@
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
Expand Down Expand Up @@ -116,6 +117,32 @@ public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
public void clearSpaceQuota(Path src) throws IOException {
dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
}

/**
* Set the quota by storage type for a directory. Note that
* directories and sym links do not occupy disk space.
*
* @param src the target directory to set the quota by storage type
* @param type the storage type to set for quota by storage type
* @param spaceQuota the value to set for quota by storage type
* @throws IOException in the event of error
*/
public void setQuotaByStorageType(Path src, StorageType type, long spaceQuota)
throws IOException {
dfs.setQuotaByStorageType(src, type, spaceQuota);
}

/**
* Clear the space quota by storage type for a directory. Note that
* directories and sym links do not occupy disk space.
*
* @param src the target directory to clear the quota by storage type
* @param type the storage type to clear for quota by storage type
* @throws IOException in the event of error
*/
public void clearQuotaByStorageType(Path src, StorageType type) throws IOException {
dfs.setQuotaByStorageType(src, type, HdfsConstants.QUOTA_RESET);
}

/**
* Allow snapshot on a directory.
Expand Down
Expand Up @@ -51,6 +51,7 @@
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.retry.AtMostOnce;
Expand Down Expand Up @@ -951,7 +952,9 @@ public ContentSummary getContentSummary(String path)
* @param namespaceQuota Limit on the number of names in the tree rooted
* at the directory
* @param diskspaceQuota Limit on disk space occupied all the files under
* this directory.
* this directory.
* @param type StorageType that the space quota is intended to be set on.
* It may be null when called by traditional space/namespace quota.
* <br><br>
*
* The quota can have three types of values : (1) 0 or more will set
Expand All @@ -968,8 +971,8 @@ public ContentSummary getContentSummary(String path)
* @throws IOException If an I/O error occurred
*/
@Idempotent
public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
throws AccessControlException, FileNotFoundException,
public void setQuota(String path, long namespaceQuota, long diskspaceQuota,
StorageType type) throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, SnapshotAccessControlException, IOException;

/**
Expand Down
@@ -0,0 +1,56 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

package org.apache.hadoop.hdfs.protocol;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.StorageType;

import static org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix.long2String;

@InterfaceAudience.Private
@InterfaceStability.Evolving
public class QuotaByStorageTypeExceededException extends QuotaExceededException {
protected static final long serialVersionUID = 1L;
protected StorageType type;

public QuotaByStorageTypeExceededException() {}

public QuotaByStorageTypeExceededException(String msg) {
super(msg);
}

public QuotaByStorageTypeExceededException(long quota, long count, StorageType type) {
super(quota, count);
this.type = type;
}

@Override
public String getMessage() {
String msg = super.getMessage();
if (msg == null) {
return "Quota by storage type : " + type.toString() +
" on path : " + (pathName==null ? "": pathName) +
" is exceeded. quota = " + long2String(quota, "B", 2) +
" but space consumed = " + long2String(count, "B", 2);
} else {
return msg;
}
}
}
Expand Up @@ -887,7 +887,9 @@ public SetQuotaResponseProto setQuota(RpcController controller,
SetQuotaRequestProto req) throws ServiceException {
try {
server.setQuota(req.getPath(), req.getNamespaceQuota(),
req.getDiskspaceQuota());
req.getDiskspaceQuota(),
req.hasStorageType() ?
PBHelper.convertStorageType(req.getStorageType()): null);
return VOID_SETQUOTA_RESPONSE;
} catch (IOException e) {
throw new ServiceException(e);
Expand Down
Expand Up @@ -172,6 +172,7 @@
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufHelper;
Expand Down Expand Up @@ -805,14 +806,19 @@ public ContentSummary getContentSummary(String path)
}

@Override
public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
public void setQuota(String path, long namespaceQuota, long diskspaceQuota,
StorageType type)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
SetQuotaRequestProto req = SetQuotaRequestProto.newBuilder()
final SetQuotaRequestProto.Builder builder
= SetQuotaRequestProto.newBuilder()
.setPath(path)
.setNamespaceQuota(namespaceQuota)
.setDiskspaceQuota(diskspaceQuota)
.build();
.setDiskspaceQuota(diskspaceQuota);
if (type != null) {
builder.setStorageType(PBHelper.convertStorageType(type));
}
final SetQuotaRequestProto req = builder.build();
try {
rpcProxy.setQuota(null, req);
} catch (ServiceException e) {
Expand Down
Expand Up @@ -68,6 +68,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.StorageType;
import org.apache.hadoop.hdfs.inotify.EventBatch;
import org.apache.hadoop.hdfs.inotify.EventBatchList;
import org.apache.hadoop.hdfs.protocol.AclException;
Expand Down Expand Up @@ -1191,9 +1192,14 @@ public ContentSummary getContentSummary(String path) throws IOException {
}

@Override // ClientProtocol
public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
public void setQuota(String path, long namespaceQuota, long diskspaceQuota,
StorageType type)
throws IOException {
checkNNStartup();
if (type != null) {
throw new UnsupportedActionException(
"Quota by storage type support is not fully supported by namenode yet.");
}
namesystem.setQuota(path, namespaceQuota, diskspaceQuota);
}

Expand Down

0 comments on commit 8de80ff

Please sign in to comment.