Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

HDFS-16710. Remove redundant throw exceptions in org.apache.hadoop.hdfs.server.namenode package #4670

Open
wants to merge 1 commit into
base: trunk
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -408,8 +408,7 @@ else if(!nnReg.isRole(NamenodeRole.NAMENODE)) {
}

// TODO: move to a common with DataNode util class
private static NamespaceInfo handshake(NamenodeProtocol namenode)
throws IOException, SocketTimeoutException {
private static NamespaceInfo handshake(NamenodeProtocol namenode) throws IOException {
NamespaceInfo nsInfo;
nsInfo = namenode.versionRequest(); // throws SocketTimeoutException
String errorMsg = null;
Expand Down
Expand Up @@ -531,8 +531,7 @@ private void addInternal(CacheDirective directive, CachePool pool) {
* Adds a directive, skipping most error checking. This should only be called
* internally in special scenarios like edit log replay.
*/
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
throws InvalidRequestException {
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive) {
long id = directive.getId();
CacheDirective entry = new CacheDirective(directive);
CachePool pool = cachePools.get(directive.getPool());
Expand Down
Expand Up @@ -114,7 +114,7 @@ public String toString() {
+ blockpoolID ;
}

boolean storageVersionMatches(StorageInfo si) throws IOException {
boolean storageVersionMatches(StorageInfo si) {
return (layoutVersion == si.layoutVersion) && (cTime == si.cTime);
}

Expand Down
Expand Up @@ -63,7 +63,7 @@ int length() {
}
}

EditLogBackupInputStream(String name) throws IOException {
EditLogBackupInputStream(String name) {
address = name;
inner = new ByteBufferInputStream();
in = null;
Expand Down
Expand Up @@ -148,8 +148,7 @@ private EditLogFileInputStream(LogSource log,
this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}

private void init(boolean verifyLayoutVersion)
throws LogHeaderCorruptException, IOException {
private void init(boolean verifyLayoutVersion) throws IOException {
Preconditions.checkState(state == State.UNINIT);
BufferedInputStream bin = null;
InputStream fStream = null;
Expand Down Expand Up @@ -374,7 +373,7 @@ static FSEditLogLoader.EditLogValidation scanEditLog(File file,
*/
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
throws IOException, LogHeaderCorruptException {
throws IOException {
int logVersion;
try {
logVersion = in.readInt();
Expand Down
Expand Up @@ -43,7 +43,6 @@
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
Expand Down Expand Up @@ -348,8 +347,7 @@ void removeEncryptionZone(Long inodeId) {
* <p>
* Called while holding the FSDirectory lock.
*/
boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException,
SnapshotAccessControlException, IOException {
boolean isInAnEZ(INodesInPath iip) throws IOException {
assert dir.hasReadLock();
return (getEncryptionZoneForPath(iip) != null);
}
Expand Down
Expand Up @@ -272,8 +272,7 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src,

static boolean unprotectedSetPermission(
FSDirectory fsd, INodesInPath iip, FsPermission permissions)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
throws FileNotFoundException {
assert fsd.hasWriteLock();
final INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
Expand All @@ -284,8 +283,7 @@ static boolean unprotectedSetPermission(

static boolean unprotectedSetOwner(
FSDirectory fsd, INodesInPath iip, String username, String groupname)
throws FileNotFoundException, UnresolvedLinkException,
QuotaExceededException, SnapshotAccessControlException {
throws FileNotFoundException {
assert fsd.hasWriteLock();
final INode inode = FSDirectory.resolveLastINode(iip);
long oldPerm = inode.getPermissionLong();
Expand Down Expand Up @@ -383,8 +381,7 @@ static INodeDirectory unprotectedSetQuota(

static BlockInfo[] unprotectedSetReplication(
FSDirectory fsd, INodesInPath iip, short replication)
throws QuotaExceededException, UnresolvedLinkException,
SnapshotAccessControlException, UnsupportedActionException {
throws QuotaExceededException {
assert fsd.hasWriteLock();

final BlockManager bm = fsd.getBlockManager();
Expand Down
Expand Up @@ -35,15 +35,13 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto;
Expand Down Expand Up @@ -495,8 +493,7 @@ static FileEncryptionInfo getFileEncryptionInfo(final FSDirectory fsd,
* @throws RetryStartFileException if key is inconsistent with current zone
*/
static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir,
INodesInPath iip, EncryptionKeyInfo ezInfo)
throws RetryStartFileException, IOException {
INodesInPath iip, EncryptionKeyInfo ezInfo) throws IOException {
FileEncryptionInfo feInfo = null;
final EncryptionZone zone = getEZForPath(dir, iip);
if (zone != null) {
Expand All @@ -519,8 +516,7 @@ static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir,
}

static boolean isInAnEZ(final FSDirectory fsd, final INodesInPath iip)
throws UnresolvedLinkException, SnapshotAccessControlException,
IOException {
throws IOException {
if (!fsd.ezManager.hasCreatedEncryptionZone()) {
return false;
}
Expand Down
Expand Up @@ -67,10 +67,9 @@ private FSDirErasureCodingOp() {}
* @param fsn namespace
* @param ecPolicyName name of EC policy to be checked
* @return an erasure coding policy if ecPolicyName is valid and enabled
* @throws IOException
*/
static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
final FSNamesystem fsn, final String ecPolicyName) throws IOException {
final FSNamesystem fsn, final String ecPolicyName) {
assert fsn.hasReadLock();
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
.getEnabledPolicyByName(ecPolicyName);
Expand Down Expand Up @@ -99,10 +98,9 @@ static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
* @param fsn namespace
* @param ecPolicyName name of EC policy to be checked
* @return an erasure coding policy if ecPolicyName is valid
* @throws IOException
*/
static ErasureCodingPolicy getErasureCodingPolicyByName(
final FSNamesystem fsn, final String ecPolicyName) throws IOException {
final FSNamesystem fsn, final String ecPolicyName) {
assert fsn.hasReadLock();
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
.getErasureCodingPolicyByName(ecPolicyName);
Expand All @@ -126,12 +124,11 @@ static ErasureCodingPolicy getErasureCodingPolicyByName(
* @return {@link FileStatus}
* @throws IOException
* @throws HadoopIllegalArgumentException if the policy is not enabled
* @throws AccessControlException if the user does not have write access
*/
static FileStatus setErasureCodingPolicy(final FSNamesystem fsn,
final String srcArg, final String ecPolicyName,
final FSPermissionChecker pc, final boolean logRetryCache)
throws IOException, AccessControlException {
throws IOException {
assert fsn.hasWriteLock();

String src = srcArg;
Expand Down Expand Up @@ -348,11 +345,9 @@ private static List<XAttr> removeErasureCodingPolicyXAttr(
* been set or the policy is REPLICATION
* @throws IOException
* @throws FileNotFoundException if the path does not exist.
* @throws AccessControlException if no read access
*/
static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
final String src, FSPermissionChecker pc)
throws IOException, AccessControlException {
final String src, FSPermissionChecker pc) throws IOException {
assert fsn.hasReadLock();

if (FSDirectory.isExactReservedName(src)) {
Expand Down Expand Up @@ -439,8 +434,7 @@ static ErasureCodingPolicyInfo[] getErasureCodingPolicies(
* @param fsn namespace
* @return {@link java.util.HashMap} array
*/
static Map<String, String> getErasureCodingCodecs(final FSNamesystem fsn)
throws IOException {
static Map<String, String> getErasureCodingCodecs(final FSNamesystem fsn) {
assert fsn.hasReadLock();
return CodecRegistry.getInstance().getCodec2CoderCompactMap();
}
Expand Down Expand Up @@ -485,8 +479,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(
return null;
}

private static XAttr getErasureCodingPolicyXAttrForINode(
FSNamesystem fsn, INode inode) throws IOException {
private static XAttr getErasureCodingPolicyXAttrForINode(FSNamesystem fsn, INode inode) {
// INode can be null
if (inode == null) {
return null;
Expand Down
Expand Up @@ -132,8 +132,7 @@ static INodesInPath renameForEditLog(FSDirectory fsd, String src, String dst,

// if destination is a directory, append source child's name, else return
// iip as-is.
private static INodesInPath dstForRenameTo(
INodesInPath srcIIP, INodesInPath dstIIP) throws IOException {
private static INodesInPath dstForRenameTo(INodesInPath srcIIP, INodesInPath dstIIP) {
INode dstINode = dstIIP.getLastINode();
if (dstINode != null && dstINode.isDirectory()) {
byte[] childName = srcIIP.getLastLocalName();
Expand Down
Expand Up @@ -22,13 +22,11 @@

import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
Expand Down Expand Up @@ -70,7 +68,7 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg,
final long newLength, final String clientName,
final String clientMachine, final long mtime,
final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc)
throws IOException, UnresolvedLinkException {
throws IOException {
assert fsn.hasWriteLock();

FSDirectory fsd = fsn.getFSDirectory();
Expand Down Expand Up @@ -173,8 +171,7 @@ static void unprotectedTruncate(final FSNamesystem fsn,
final INodesInPath iip,
final String clientName, final String clientMachine,
final long newLength, final long mtime, final Block truncateBlock)
throws UnresolvedLinkException, QuotaExceededException,
SnapshotAccessControlException, IOException {
throws IOException {
assert fsn.hasWriteLock();

FSDirectory fsd = fsn.getFSDirectory();
Expand Down
Expand Up @@ -410,7 +410,7 @@ static List<XAttr> setINodeXAttrs(
}

static XAttr getXAttrByPrefixedName(FSDirectory fsd, INodesInPath iip,
String prefixedName) throws IOException {
String prefixedName) {
fsd.readLock();
try {
return XAttrStorage.readINodeXAttrByPrefixedName(iip.getLastINode(),
Expand All @@ -421,8 +421,7 @@ static XAttr getXAttrByPrefixedName(FSDirectory fsd, INodesInPath iip,
}

static XAttr unprotectedGetXAttrByPrefixedName(
INode inode, int snapshotId, String prefixedName)
throws IOException {
INode inode, int snapshotId, String prefixedName) {
return XAttrStorage.readINodeXAttrByPrefixedName(
inode, snapshotId, prefixedName);
}
Expand Down
Expand Up @@ -811,10 +811,8 @@ public boolean isNonEmptyDirectory(INodesInPath inodesInPath) {

/**
* Check whether the filepath could be created
* @throws SnapshotAccessControlException if path is in RO snapshot
*/
boolean isValidToCreate(String src, INodesInPath iip)
throws SnapshotAccessControlException {
boolean isValidToCreate(String src, INodesInPath iip) {
String srcs = normalizePath(src);
return srcs.startsWith("/") && !srcs.endsWith("/") &&
iip.getLastINode() == null;
Expand Down Expand Up @@ -1788,7 +1786,7 @@ private static byte[][] constructRemainingPath(byte[][] components,
return components;
}

INode getINode4DotSnapshot(INodesInPath iip) throws UnresolvedLinkException {
INode getINode4DotSnapshot(INodesInPath iip) {
Preconditions.checkArgument(
iip.isDotSnapshotDir(), "%s does not end with %s",
iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
Expand Down
Expand Up @@ -1285,7 +1285,7 @@ void purgeOldStorage(NameNodeFile nnf) {
* Rename FSImage with the specific txid
*/
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
NameNodeFile toNnf, boolean renameMD5) throws IOException {
NameNodeFile toNnf, boolean renameMD5) {
ArrayList<StorageDirectory> al = null;

for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
Expand Down
Expand Up @@ -73,7 +73,7 @@ private Matcher matchPattern(String name) {
}

@Override
public void inspectDirectory(StorageDirectory sd) throws IOException {
public void inspectDirectory(StorageDirectory sd) {
// Was the directory just formatted?
if (!sd.getVersionFile().exists()) {
LOG.info("No version file in " + sd.getRoot());
Expand Down
Expand Up @@ -252,7 +252,6 @@
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
Expand Down Expand Up @@ -2344,8 +2343,7 @@ void setTimes(String src, long mtime, long atime) throws IOException {
* false if client needs to wait for block recovery.
*/
boolean truncate(String src, long newLength, String clientName,
String clientMachine, long mtime) throws IOException,
UnresolvedLinkException {
String clientMachine, long mtime) throws IOException {

final String operationName = "truncate";
requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
Expand Down Expand Up @@ -2553,8 +2551,7 @@ void satisfyStoragePolicy(String src, boolean logRetryCache)
logAuditEvent(true, operationName, src, null, auditStat);
}

private void validateStoragePolicySatisfy()
throws UnsupportedActionException, IOException {
private void validateStoragePolicySatisfy() throws IOException {
// checks sps status
boolean disabled = (blockManager.getSPSManager() == null);
if (disabled) {
Expand Down Expand Up @@ -2660,8 +2657,7 @@ long getPreferredBlockSize(String src) throws IOException {
*/
CryptoProtocolVersion chooseProtocolVersion(
EncryptionZone zone, CryptoProtocolVersion[] supportedVersions)
throws UnknownCryptoProtocolVersionException, UnresolvedLinkException,
SnapshotAccessControlException {
throws UnknownCryptoProtocolVersionException {
Preconditions.checkNotNull(zone);
Preconditions.checkNotNull(supportedVersions);
// Right now, we only support a single protocol version,
Expand Down Expand Up @@ -8049,8 +8045,7 @@ void createEncryptionZone(final String src, final String keyName,
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
*/
EncryptionZone getEZForPath(final String srcArg)
throws AccessControlException, UnresolvedLinkException, IOException {
EncryptionZone getEZForPath(final String srcArg) throws IOException {
final String operationName = "getEZForPath";
FileStatus resultingStat = null;
EncryptionZone encryptionZone;
Expand Down Expand Up @@ -8367,9 +8362,8 @@ boolean disableErasureCodingPolicy(String ecPolicyName,
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
void unsetErasureCodingPolicy(final String srcArg,
final boolean logRetryCache) throws IOException,
UnresolvedLinkException, SafeModeException, AccessControlException {
void unsetErasureCodingPolicy(final String srcArg, final boolean logRetryCache)
throws IOException, SafeModeException, AccessControlException {
final String operationName = "unsetErasureCodingPolicy";
checkOperation(OperationCategory.WRITE);
checkErasureCodingSupported(operationName);
Expand Down Expand Up @@ -8435,8 +8429,7 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies(
/**
* Get the erasure coding policy information for specified path.
*/
ErasureCodingPolicy getErasureCodingPolicy(String src)
throws AccessControlException, UnresolvedLinkException, IOException {
ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
final String operationName = "getErasureCodingPolicy";
boolean success = false;
checkOperation(OperationCategory.READ);
Expand Down