diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index 2817b66c379ff..84fb12c003933 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -17,11 +17,28 @@ */ package org.apache.hadoop.hdfs; +import com.google.common.base.Joiner; +import com.google.common.collect.Maps; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; +import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.UnsupportedEncodingException; +import java.net.InetSocketAddress; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; + +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_NAMESERVICES; public class DFSUtilClient { + private static final Logger LOG = LoggerFactory.getLogger( + DFSUtilClient.class); /** * Converts a byte array to a string using UTF8 encoding. */ @@ -44,6 +61,58 @@ public static String percent2String(double percentage) { return StringUtils.format("%.2f%%", percentage); } + /** + * Returns collection of nameservice Ids from the configuration. + * @param conf configuration + * @return collection of nameservice Ids, or null if not specified + */ + public static Collection getNameServiceIds(Configuration conf) { + return conf.getTrimmedStringCollection(DFS_NAMESERVICES); + } + + /** + * Namenode HighAvailability related configuration. + * Returns collection of namenode Ids from the configuration. One logical id + * for each namenode in the in the HA setup. + * + * @param conf configuration + * @param nsId the nameservice ID to look at, or null for non-federated + * @return collection of namenode Ids + */ + public static Collection getNameNodeIds(Configuration conf, String nsId) { + String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId); + return conf.getTrimmedStringCollection(key); + } + + /** Add non empty and non null suffix to a key */ + static String addSuffix(String key, String suffix) { + if (suffix == null || suffix.isEmpty()) { + return key; + } + assert !suffix.startsWith(".") : + "suffix '" + suffix + "' should not already have '.' prepended."; + return key + "." + suffix; + } + + /** + * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from + * the configuration. + * + * @return list of InetSocketAddresses + */ + public static Map> getHaNnWebHdfsAddresses( + Configuration conf, String scheme) { + if (WebHdfsConstants.WEBHDFS_SCHEME.equals(scheme)) { + return getAddresses(conf, null, + HdfsClientConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); + } else if (WebHdfsConstants.SWEBHDFS_SCHEME.equals(scheme)) { + return getAddresses(conf, null, + HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); + } else { + throw new IllegalArgumentException("Unsupported scheme: " + scheme); + } + } + /** * Decode a specific range of bytes of the given byte array to a string * using UTF8. @@ -62,4 +131,107 @@ private static String bytes2String(byte[] bytes, int offset, int length) { return null; } + /** + * @return coll if it is non-null and non-empty. Otherwise, + * returns a list with a single null value. + */ + static Collection emptyAsSingletonNull(Collection coll) { + if (coll == null || coll.isEmpty()) { + return Collections.singletonList(null); + } else { + return coll; + } + } + + /** Concatenate list of suffix strings '.' separated */ + static String concatSuffixes(String... suffixes) { + if (suffixes == null) { + return null; + } + return Joiner.on(".").skipNulls().join(suffixes); + } + + /** + * Returns the configured address for all NameNodes in the cluster. + * @param conf configuration + * @param defaultAddress default address to return in case key is not found. + * @param keys Set of keys to look for in the order of preference + * @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) + */ + static Map> + getAddresses(Configuration conf, String defaultAddress, String... keys) { + Collection nameserviceIds = getNameServiceIds(conf); + return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys); + } + + /** + * Returns the configured address for all NameNodes in the cluster. + * @param conf configuration + * @param defaultAddress default address to return in case key is not found. + * @param keys Set of keys to look for in the order of preference + * + * @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) + */ + static Map> + getAddressesForNsIds( + Configuration conf, Collection nsIds, String defaultAddress, + String... keys) { + // Look for configurations of the form [.][.] + // across all of the configured nameservices and namenodes. + Map> ret = Maps.newLinkedHashMap(); + for (String nsId : emptyAsSingletonNull(nsIds)) { + Map isas = + getAddressesForNameserviceId(conf, nsId, defaultAddress, keys); + if (!isas.isEmpty()) { + ret.put(nsId, isas); + } + } + return ret; + } + + static Map getAddressesForNameserviceId( + Configuration conf, String nsId, String defaultValue, String... keys) { + Collection nnIds = getNameNodeIds(conf, nsId); + Map ret = Maps.newHashMap(); + for (String nnId : emptyAsSingletonNull(nnIds)) { + String suffix = concatSuffixes(nsId, nnId); + String address = getConfValue(defaultValue, suffix, conf, keys); + if (address != null) { + InetSocketAddress isa = NetUtils.createSocketAddr(address); + if (isa.isUnresolved()) { + LOG.warn("Namenode for " + nsId + + " remains unresolved for ID " + nnId + + ". Check your hdfs-site.xml file to " + + "ensure namenodes are configured properly."); + } + ret.put(nnId, isa); + } + } + return ret; + } + + /** + * Given a list of keys in the order of preference, returns a value + * for the key in the given order from the configuration. + * @param defaultValue default value to return, when key was not found + * @param keySuffix suffix to add to the key, if it is not null + * @param conf Configuration + * @param keys list of keys in the order of preference + * @return value of the key or default if a key was not found in configuration + */ + private static String getConfValue(String defaultValue, String keySuffix, + Configuration conf, String... keys) { + String value = null; + for (String key : keys) { + key = addSuffix(key, keySuffix); + value = conf.get(key); + if (value != null) { + break; + } + } + if (value == null) { + value = defaultValue; + } + return value; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java new file mode 100644 index 0000000000000..7956838500543 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/HAUtilClient.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.token.Token; + +import java.net.URI; + +import static org.apache.hadoop.hdfs.protocol.HdfsConstantsClient.HA_DT_SERVICE_PREFIX; + +@InterfaceAudience.Private +public class HAUtilClient { + /** + * @return true if the given nameNodeUri appears to be a logical URI. + */ + public static boolean isLogicalUri( + Configuration conf, URI nameNodeUri) { + String host = nameNodeUri.getHost(); + // A logical name must be one of the service IDs. + return DFSUtilClient.getNameServiceIds(conf).contains(host); + } + + /** + * Check whether the client has a failover proxy provider configured + * for the namenode/nameservice. + * + * @param conf Configuration + * @param nameNodeUri The URI of namenode + * @return true if failover is configured. + */ + public static boolean isClientFailoverConfigured( + Configuration conf, URI nameNodeUri) { + String host = nameNodeUri.getHost(); + String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + + "." + host; + return conf.get(configKey) != null; + } + + /** + * Get the service name used in the delegation token for the given logical + * HA service. + * @param uri the logical URI of the cluster + * @param scheme the scheme of the corresponding FileSystem + * @return the service name + */ + public static Text buildTokenServiceForLogicalUri(final URI uri, + final String scheme) { + return new Text(buildTokenServicePrefixForLogicalUri(scheme) + + uri.getHost()); + } + + public static String buildTokenServicePrefixForLogicalUri(String scheme) { + return HA_DT_SERVICE_PREFIX + scheme + ":"; + } + + /** + * Parse the file system URI out of the provided token. + */ + public static URI getServiceUriFromToken(final String scheme, Token token) { + String tokStr = token.getService().toString(); + final String prefix = buildTokenServicePrefixForLogicalUri( + scheme); + if (tokStr.startsWith(prefix)) { + tokStr = tokStr.replaceFirst(prefix, ""); + } + return URI.create(scheme + "://" + tokStr); + } + + /** + * @return true if this token corresponds to a logical nameservice + * rather than a specific namenode. + */ + public static boolean isTokenForLogicalUri(Token token) { + return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index f9965b4fefd74..d11922d09e093 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -31,6 +31,12 @@ public interface HdfsClientConfigKeys { "^(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?(,(default:)?(user|group|mask|other):[[A-Za-z_][A-Za-z0-9._-]]*:([rwx-]{3})?)*$"; static final String PREFIX = "dfs.client."; + String DFS_NAMESERVICES = "dfs.nameservices"; + int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; + String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; + int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470; + String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; + String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; /** dfs.client.retry configuration properties */ interface Retry { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java index 40c320366c4eb..ab4310e0813df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstantsClient.java @@ -32,4 +32,10 @@ public interface HdfsConstantsClient { */ long GRANDFATHER_INODE_ID = 0; byte BLOCK_STORAGE_POLICY_ID_UNSPECIFIED = 0; + /** + * A prefix put before the namenode URI inside the "service" field + * of a delgation token, indicating that the URI is a logical (HA) + * URI. + */ + String HA_DT_SERVICE_PREFIX = "ha-"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java index 25f3cfde08f26..50da8998264cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsConstants.java @@ -23,7 +23,8 @@ @InterfaceAudience.Private public class WebHdfsConstants { - /** Delegation token kind */ + public static final String WEBHDFS_SCHEME = "webhdfs"; + public static final String SWEBHDFS_SCHEME = "swebhdfs"; public static final Text WEBHDFS_TOKEN_KIND = new Text("WEBHDFS delegation"); public static final Text SWEBHDFS_TOKEN_KIND = new Text("SWEBHDFS delegation"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e162d286406c9..03c52282a1085 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -455,6 +455,9 @@ Release 2.8.0 - UNRELEASED HDFS-8133. Improve readability of deleted block check (Daryn Sharp via Colin P. McCabe) + HDFS-8185. Separate client related routines in HAUtil into a new class. + (wheat9) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 6a73a9345af79..6721412c48a52 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -806,10 +806,10 @@ public void cancel(Token token, Configuration conf) throws IOException { private static ClientProtocol getNNProxy( Token token, Configuration conf) throws IOException { - URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME, - token); - if (HAUtil.isTokenForLogicalUri(token) && - !HAUtil.isLogicalUri(conf, uri)) { + URI uri = HAUtilClient.getServiceUriFromToken( + HdfsConstants.HDFS_URI_SCHEME, token); + if (HAUtilClient.isTokenForLogicalUri(token) && + !HAUtilClient.isLogicalUri(conf, uri)) { // If the token is for a logical nameservice, but the configuration // we have disagrees about that, we can't actually renew it. // This can be the case in MR, for example, if the RM doesn't diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 9cb00a0c63511..9a754cd164d5d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -102,8 +102,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { "dfs.namenode.path.based.cache.block.map.allocation.percent"; public static final float DFS_NAMENODE_PATH_BASED_CACHE_BLOCK_MAP_ALLOCATION_PERCENT_DEFAULT = 0.25f; - public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = 50070; - public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; + public static final int DFS_NAMENODE_HTTP_PORT_DEFAULT = + HdfsClientConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT; + public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = + HdfsClientConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY; public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT; public static final String DFS_NAMENODE_HTTP_BIND_HOST_KEY = "dfs.namenode.http-bind-host"; public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address"; @@ -302,8 +304,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys { //Following keys have no defaults public static final String DFS_DATANODE_DATA_DIR_KEY = "dfs.datanode.data.dir"; - public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT = 50470; - public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY = "dfs.namenode.https-address"; + public static final int DFS_NAMENODE_HTTPS_PORT_DEFAULT = + HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT; + public static final String DFS_NAMENODE_HTTPS_ADDRESS_KEY = + HdfsClientConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; public static final String DFS_NAMENODE_HTTPS_BIND_HOST_KEY = "dfs.namenode.https-bind-host"; public static final String DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTPS_PORT_DEFAULT; public static final String DFS_NAMENODE_NAME_DIR_KEY = "dfs.namenode.name.dir"; @@ -485,7 +489,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold"; public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10; public static final String DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY = "dfs.namenode.legacy-oiv-image.dir"; - + public static final String DFS_NAMESERVICES = "dfs.nameservices"; public static final String DFS_NAMESERVICE_ID = "dfs.nameservice.id"; public static final String DFS_INTERNAL_NAMESERVICES_KEY = "dfs.internal.nameservices"; @@ -513,7 +517,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final boolean DFS_QUOTA_BY_STORAGETYPE_ENABLED_DEFAULT = true; // HA related configuration - public static final String DFS_HA_NAMENODES_KEY_PREFIX = "dfs.ha.namenodes"; + public static final String DFS_HA_NAMENODES_KEY_PREFIX = + HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; public static final String DFS_HA_NAMENODE_ID_KEY = "dfs.ha.namenode.id"; public static final String DFS_HA_STANDBY_CHECKPOINTS_KEY = "dfs.ha.standby.checkpoints"; public static final boolean DFS_HA_STANDBY_CHECKPOINTS_DEFAULT = true; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index a2c16b713ec2e..60a496fa48c90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -21,7 +21,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT; @@ -31,7 +30,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY; @@ -48,7 +46,6 @@ import java.text.SimpleDateFormat; import java.util.Arrays; import java.util.Collection; -import java.util.Collections; import java.util.Comparator; import java.util.Date; import java.util.HashSet; @@ -87,8 +84,6 @@ import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.NameNode; -import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpServer2; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -106,7 +101,6 @@ import com.google.common.base.Joiner; import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import com.google.common.collect.Maps; import com.google.common.primitives.SignedBytes; import com.google.protobuf.BlockingService; @@ -518,7 +512,7 @@ public static BlockLocation[] locatedBlocks2Locations(List blocks) for (int hCnt = 0; hCnt < locations.length; hCnt++) { hosts[hCnt] = locations[hCnt].getHostName(); xferAddrs[hCnt] = locations[hCnt].getXferAddr(); - NodeBase node = new NodeBase(xferAddrs[hCnt], + NodeBase node = new NodeBase(xferAddrs[hCnt], locations[hCnt].getNetworkLocation()); racks[hCnt] = node.toString(); } @@ -537,128 +531,14 @@ public static BlockLocation[] locatedBlocks2Locations(List blocks) return blkLocations; } - /** - * Returns collection of nameservice Ids from the configuration. - * @param conf configuration - * @return collection of nameservice Ids, or null if not specified - */ - public static Collection getNameServiceIds(Configuration conf) { - return conf.getTrimmedStringCollection(DFS_NAMESERVICES); - } - - /** - * @return coll if it is non-null and non-empty. Otherwise, - * returns a list with a single null value. - */ - private static Collection emptyAsSingletonNull(Collection coll) { - if (coll == null || coll.isEmpty()) { - return Collections.singletonList(null); - } else { - return coll; - } - } - - /** - * Namenode HighAvailability related configuration. - * Returns collection of namenode Ids from the configuration. One logical id - * for each namenode in the in the HA setup. - * - * @param conf configuration - * @param nsId the nameservice ID to look at, or null for non-federated - * @return collection of namenode Ids - */ - public static Collection getNameNodeIds(Configuration conf, String nsId) { - String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId); - return conf.getTrimmedStringCollection(key); - } - - /** - * Given a list of keys in the order of preference, returns a value - * for the key in the given order from the configuration. - * @param defaultValue default value to return, when key was not found - * @param keySuffix suffix to add to the key, if it is not null - * @param conf Configuration - * @param keys list of keys in the order of preference - * @return value of the key or default if a key was not found in configuration - */ - private static String getConfValue(String defaultValue, String keySuffix, - Configuration conf, String... keys) { - String value = null; - for (String key : keys) { - key = addSuffix(key, keySuffix); - value = conf.get(key); - if (value != null) { - break; - } - } - if (value == null) { - value = defaultValue; - } - return value; - } - - /** Add non empty and non null suffix to a key */ - private static String addSuffix(String key, String suffix) { - if (suffix == null || suffix.isEmpty()) { - return key; - } - assert !suffix.startsWith(".") : - "suffix '" + suffix + "' should not already have '.' prepended."; - return key + "." + suffix; - } - - /** Concatenate list of suffix strings '.' separated */ - private static String concatSuffixes(String... suffixes) { - if (suffixes == null) { - return null; - } - return Joiner.on(".").skipNulls().join(suffixes); - } - /** * Return configuration key of format key.suffix1.suffix2...suffixN */ public static String addKeySuffixes(String key, String... suffixes) { - String keySuffix = concatSuffixes(suffixes); - return addSuffix(key, keySuffix); - } - - /** - * Returns the configured address for all NameNodes in the cluster. - * @param conf configuration - * @param defaultAddress default address to return in case key is not found. - * @param keys Set of keys to look for in the order of preference - * @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) - */ - private static Map> - getAddresses(Configuration conf, String defaultAddress, String... keys) { - Collection nameserviceIds = getNameServiceIds(conf); - return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys); + String keySuffix = DFSUtilClient.concatSuffixes(suffixes); + return DFSUtilClient.addSuffix(key, keySuffix); } - /** - * Returns the configured address for all NameNodes in the cluster. - * @param conf configuration - * @param nsIds - *@param defaultAddress default address to return in case key is not found. - * @param keys Set of keys to look for in the order of preference @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) - */ - private static Map> - getAddressesForNsIds(Configuration conf, Collection nsIds, - String defaultAddress, String... keys) { - // Look for configurations of the form [.][.] - // across all of the configured nameservices and namenodes. - Map> ret = Maps.newLinkedHashMap(); - for (String nsId : emptyAsSingletonNull(nsIds)) { - Map isas = - getAddressesForNameserviceId(conf, nsId, defaultAddress, keys); - if (!isas.isEmpty()) { - ret.put(nsId, isas); - } - } - return ret; - } - /** * Get all of the RPC addresses of the individual NNs in a given nameservice. * @@ -669,30 +549,8 @@ public static String addKeySuffixes(String key, String... suffixes) { */ public static Map getRpcAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue) { - return getAddressesForNameserviceId(conf, nsId, defaultValue, - DFS_NAMENODE_RPC_ADDRESS_KEY); - } - - private static Map getAddressesForNameserviceId( - Configuration conf, String nsId, String defaultValue, - String... keys) { - Collection nnIds = getNameNodeIds(conf, nsId); - Map ret = Maps.newHashMap(); - for (String nnId : emptyAsSingletonNull(nnIds)) { - String suffix = concatSuffixes(nsId, nnId); - String address = getConfValue(defaultValue, suffix, conf, keys); - if (address != null) { - InetSocketAddress isa = NetUtils.createSocketAddr(address); - if (isa.isUnresolved()) { - LOG.warn("Namenode for " + nsId + - " remains unresolved for ID " + nnId + - ". Check your hdfs-site.xml file to " + - "ensure namenodes are configured properly."); - } - ret.put(nnId, isa); - } - } - return ret; + return DFSUtilClient.getAddressesForNameserviceId(conf, nsId, defaultValue, + DFS_NAMENODE_RPC_ADDRESS_KEY); } /** @@ -700,9 +558,9 @@ private static Map getAddressesForNameserviceId( */ public static Set getAllNnPrincipals(Configuration conf) throws IOException { Set principals = new HashSet(); - for (String nsId : DFSUtil.getNameServiceIds(conf)) { + for (String nsId : DFSUtilClient.getNameServiceIds(conf)) { if (HAUtil.isHAEnabled(conf, nsId)) { - for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) { + for (String nnId : DFSUtilClient.getNameNodeIds(conf, nsId)) { Configuration confForNn = new Configuration(conf); NameNode.initializeGenericKeys(confForNn, nsId, nnId); String principal = SecurityUtil.getServerPrincipal(confForNn @@ -732,26 +590,8 @@ public static Set getAllNnPrincipals(Configuration conf) throws IOExcept */ public static Map> getHaNnRpcAddresses( Configuration conf) { - return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); - } - - /** - * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from - * the configuration. - * - * @return list of InetSocketAddresses - */ - public static Map> getHaNnWebHdfsAddresses( - Configuration conf, String scheme) { - if (WebHdfsFileSystem.SCHEME.equals(scheme)) { - return getAddresses(conf, null, - DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); - } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { - return getAddresses(conf, null, - DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); - } else { - throw new IllegalArgumentException("Unsupported scheme: " + scheme); - } + return DFSUtilClient.getAddresses(conf, null, + DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } /** @@ -764,8 +604,8 @@ public static Map> getHaNnWebHdfsAddresse */ public static Map> getBackupNodeAddresses( Configuration conf) throws IOException { - Map> addressList = getAddresses(conf, - null, DFS_NAMENODE_BACKUP_ADDRESS_KEY); + Map> addressList = DFSUtilClient.getAddresses( + conf, null, DFS_NAMENODE_BACKUP_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: backup node address " + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured."); @@ -783,8 +623,8 @@ public static Map> getBackupNodeAddresses */ public static Map> getSecondaryNameNodeAddresses( Configuration conf) throws IOException { - Map> addressList = getAddresses(conf, null, - DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); + Map> addressList = DFSUtilClient.getAddresses( + conf, null, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: secondary namenode address " + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured."); @@ -815,8 +655,9 @@ public static Map> getNNServiceRpcAddress } Map> addressList = - getAddresses(conf, defaultAddress, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + DFSUtilClient.getAddresses(conf, defaultAddress, + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " @@ -868,8 +709,10 @@ public static Map> getNNServiceRpcAddress } Map> addressList = - getAddressesForNsIds(conf, parentNameServices, defaultAddress, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + DFSUtilClient.getAddressesForNsIds(conf, parentNameServices, + defaultAddress, + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + DFS_NAMENODE_RPC_ADDRESS_KEY); if (addressList.isEmpty()) { throw new IOException("Incorrect configuration: namenode address " + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " @@ -1001,7 +844,7 @@ public static Collection getNameServiceUris(Configuration conf, // keep track of non-preferred keys here. Set nonPreferredUris = new HashSet(); - for (String nsId : getNameServiceIds(conf)) { + for (String nsId : DFSUtilClient.getNameServiceIds(conf)) { if (HAUtil.isHAEnabled(conf, nsId)) { // Add the logical URI of the nameservice. try { @@ -1013,7 +856,7 @@ public static Collection getNameServiceUris(Configuration conf, // Add the URI corresponding to the address of the NN. boolean uriFound = false; for (String key : keys) { - String addr = conf.get(concatSuffixes(key, nsId)); + String addr = conf.get(DFSUtilClient.concatSuffixes(key, nsId)); if (addr != null) { URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME, NetUtils.createSocketAddr(addr)); @@ -1311,7 +1154,7 @@ private static String getNameServiceId(Configuration conf, String addressKey) { if (nameserviceId != null) { return nameserviceId; } - Collection nsIds = getNameServiceIds(conf); + Collection nsIds = DFSUtilClient.getNameServiceIds(conf); if (1 == nsIds.size()) { return nsIds.toArray(new String[1])[0]; } @@ -1342,14 +1185,14 @@ static String[] getSuffixIDs(final Configuration conf, final String addressKey, String namenodeId = null; int found = 0; - Collection nsIds = getNameServiceIds(conf); - for (String nsId : emptyAsSingletonNull(nsIds)) { + Collection nsIds = DFSUtilClient.getNameServiceIds(conf); + for (String nsId : DFSUtilClient.emptyAsSingletonNull(nsIds)) { if (knownNsId != null && !knownNsId.equals(nsId)) { continue; } - Collection nnIds = getNameNodeIds(conf, nsId); - for (String nnId : emptyAsSingletonNull(nnIds)) { + Collection nnIds = DFSUtilClient.getNameNodeIds(conf, nsId); + for (String nnId : DFSUtilClient.emptyAsSingletonNull(nnIds)) { if (LOG.isTraceEnabled()) { LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s", addressKey, nsId, nnId)); @@ -1453,10 +1296,10 @@ public static String getNamenodeServiceAddr(final Configuration conf, nsId = getOnlyNameServiceIdOrNull(conf); } - String serviceAddrKey = concatSuffixes( + String serviceAddrKey = DFSUtilClient.concatSuffixes( DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId); - String addrKey = concatSuffixes( + String addrKey = DFSUtilClient.concatSuffixes( DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId); String serviceRpcAddr = conf.get(serviceAddrKey); @@ -1471,7 +1314,7 @@ public static String getNamenodeServiceAddr(final Configuration conf, * name of that nameservice. If it refers to 0 or more than 1, return null. */ public static String getOnlyNameServiceIdOrNull(Configuration conf) { - Collection nsIds = getNameServiceIds(conf); + Collection nsIds = DFSUtilClient.getNameServiceIds(conf); if (1 == nsIds.size()) { return nsIds.toArray(new String[1])[0]; } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 8e7daf3a97069..8d6a8fedeb400 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1534,7 +1534,7 @@ public String getCanonicalServiceName() { @Override protected URI canonicalizeUri(URI uri) { - if (HAUtil.isLogicalUri(getConf(), uri)) { + if (HAUtilClient.isLogicalUri(getConf(), uri)) { // Don't try to DNS-resolve logical URIs, since the 'authority' // portion isn't a proper hostname return uri; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java index 240256c579f33..c967c6912ed28 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java @@ -20,7 +20,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY; -import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX; import java.io.IOException; import java.net.InetSocketAddress; @@ -38,7 +37,6 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.NameNodeProxies.ProxyAndInfo; -import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -151,7 +149,7 @@ public static String getNameNodeIdOfOtherNode(Configuration conf, String nsId) { "machine is one of the machines listed as a NN RPC address, " + "or configure " + DFSConfigKeys.DFS_NAMESERVICE_ID); - Collection nnIds = DFSUtil.getNameNodeIds(conf, nsId); + Collection nnIds = DFSUtilClient.getNameNodeIds(conf, nsId); String myNNId = conf.get(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY); Preconditions.checkArgument(nnIds != null, "Could not determine namenode ids in namespace '%s'. " + @@ -205,32 +203,6 @@ public static boolean shouldAllowStandbyReads(Configuration conf) { public static void setAllowStandbyReads(Configuration conf, boolean val) { conf.setBoolean("dfs.ha.allow.stale.reads", val); } - - /** - * @return true if the given nameNodeUri appears to be a logical URI. - */ - public static boolean isLogicalUri( - Configuration conf, URI nameNodeUri) { - String host = nameNodeUri.getHost(); - // A logical name must be one of the service IDs. - return DFSUtil.getNameServiceIds(conf).contains(host); - } - - /** - * Check whether the client has a failover proxy provider configured - * for the namenode/nameservice. - * - * @param conf Configuration - * @param nameNodeUri The URI of namenode - * @return true if failover is configured. - */ - public static boolean isClientFailoverConfigured( - Configuration conf, URI nameNodeUri) { - String host = nameNodeUri.getHost(); - String configKey = HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX - + "." + host; - return conf.get(configKey) != null; - } /** * Check whether logical URI is needed for the namenode and @@ -256,43 +228,6 @@ public static boolean useLogicalUri(Configuration conf, URI nameNodeUri) return provider.useLogicalURI(); } - /** - * Parse the file system URI out of the provided token. - */ - public static URI getServiceUriFromToken(final String scheme, Token token) { - String tokStr = token.getService().toString(); - final String prefix = buildTokenServicePrefixForLogicalUri(scheme); - if (tokStr.startsWith(prefix)) { - tokStr = tokStr.replaceFirst(prefix, ""); - } - return URI.create(scheme + "://" + tokStr); - } - - /** - * Get the service name used in the delegation token for the given logical - * HA service. - * @param uri the logical URI of the cluster - * @param scheme the scheme of the corresponding FileSystem - * @return the service name - */ - public static Text buildTokenServiceForLogicalUri(final URI uri, - final String scheme) { - return new Text(buildTokenServicePrefixForLogicalUri(scheme) - + uri.getHost()); - } - - /** - * @return true if this token corresponds to a logical nameservice - * rather than a specific namenode. - */ - public static boolean isTokenForLogicalUri(Token token) { - return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); - } - - public static String buildTokenServicePrefixForLogicalUri(String scheme) { - return HA_DT_SERVICE_PREFIX + scheme + ":"; - } - /** * Locate a delegation token associated with the given HA cluster URI, and if * one is found, clone it to also represent the underlying namenode address. @@ -305,8 +240,8 @@ public static void cloneDelegationTokenForLogicalUri( UserGroupInformation ugi, URI haUri, Collection nnAddrs) { // this cloning logic is only used by hdfs - Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri, - HdfsConstants.HDFS_URI_SCHEME); + Text haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri, + HdfsConstants.HDFS_URI_SCHEME); Token haToken = tokenSelector.selectToken(haService, ugi.getTokens()); if (haToken != null) { @@ -318,7 +253,8 @@ public static void cloneDelegationTokenForLogicalUri( new Token.PrivateToken(haToken); SecurityUtil.setTokenService(specificToken, singleNNAddr); Text alias = new Text( - buildTokenServicePrefixForLogicalUri(HdfsConstants.HDFS_URI_SCHEME) + HAUtilClient.buildTokenServicePrefixForLogicalUri( + HdfsConstants.HDFS_URI_SCHEME) + "//" + specificToken.getService()); ugi.addToken(alias, specificToken); if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index ee3290bd79e18..0da7a4d3cd6d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -179,8 +179,8 @@ public static ProxyAndInfo createProxy(Configuration conf, Text dtService; if (failoverProxyProvider.useLogicalURI()) { - dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, - HdfsConstants.HDFS_URI_SCHEME); + dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, + HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); @@ -245,8 +245,8 @@ public static ProxyAndInfo createProxyWithLossyRetryHandler( new Class[] { xface }, dummyHandler); Text dtService; if (failoverProxyProvider.useLogicalURI()) { - dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri, - HdfsConstants.HDFS_URI_SCHEME); + dtService = HAUtilClient.buildTokenServiceForLogicalUri(nameNodeUri, + HdfsConstants.HDFS_URI_SCHEME); } else { dtService = SecurityUtil.buildTokenService( NameNode.getAddress(nameNodeUri)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java index 7cf8a4721d651..52259b04fa286 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java @@ -120,13 +120,6 @@ public static enum DatanodeReportType { */ public static final String HDFS_URI_SCHEME = "hdfs"; - /** - * A prefix put before the namenode URI inside the "service" field - * of a delgation token, indicating that the URI is a logical (HA) - * URI. - */ - public static final String HA_DT_SERVICE_PREFIX = "ha-"; - /** * Current layout version for NameNode. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java index 0ebf3dcc5224c..c6f8489ec567e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/ParameterParser.java @@ -21,7 +21,7 @@ import org.apache.commons.io.Charsets; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; @@ -112,10 +112,10 @@ Token delegationToken() throws IOException { Token(); token.decodeFromUrlString(delegation); URI nnUri = URI.create(HDFS_URI_SCHEME + "://" + namenodeId()); - boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); + boolean isLogical = HAUtilClient.isLogicalUri(conf, nnUri); if (isLogical) { - token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri, - HDFS_URI_SCHEME)); + token.setService( + HAUtilClient.buildTokenServiceForLogicalUri(nnUri, HDFS_URI_SCHEME)); } else { token.setService(SecurityUtil.buildTokenService(nnUri)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index 4575d64724778..1e94923b9473f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ha.ServiceFailedException; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HAUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -73,7 +74,6 @@ import org.apache.hadoop.util.JvmPauseMonitor; import org.apache.hadoop.util.ServicePlugin; import org.apache.hadoop.util.StringUtils; -import org.apache.log4j.LogManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -404,7 +404,7 @@ public void setClientNamenodeAddress(Configuration conf) { return; } - if (DFSUtil.getNameServiceIds(conf).contains(nnHost)) { + if (DFSUtilClient.getNameServiceIds(conf).contains(nnHost)) { // host name is logical clientNamenodeAddress = nnHost; } else if (nnUri.getPort() > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index b8dcbbf056c42..d608281aa3f2a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -51,6 +51,7 @@ import org.apache.hadoop.fs.shell.Command; import org.apache.hadoop.fs.shell.CommandFormat; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.client.BlockReportOptions; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; @@ -71,7 +72,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; -import org.apache.hadoop.ipc.GenericRefreshProtocol; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RefreshCallQueueProtocol; @@ -574,7 +574,7 @@ public void setSafeMode(String[] argv, int idx) throws IOException { DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); @@ -719,7 +719,7 @@ public int saveNamespace(String[] argv) throws IOException { } URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List> proxies = @@ -770,7 +770,7 @@ public int restoreFailedStorage(String arg) throws IOException { DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); @@ -803,7 +803,7 @@ public int refreshNodes() throws IOException { DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); @@ -854,7 +854,7 @@ public int setBalancerBandwidth(String[] argv, int idx) throws IOException { DistributedFileSystem dfs = (DistributedFileSystem) fs; Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); @@ -1148,7 +1148,7 @@ public int finalizeUpgrade() throws IOException { Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); - boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaAndLogicalUri = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaAndLogicalUri) { // In the case of HA and logical URI, run finalizeUpgrade for all // NNs in this nameservice. @@ -1188,7 +1188,7 @@ public int metaSave(String[] argv, int idx) throws IOException { DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); @@ -1275,7 +1275,7 @@ public int refreshServiceAcl() throws IOException { DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshServiceAcl for all NNs if HA is enabled @@ -1318,7 +1318,7 @@ public int refreshUserToGroupsMappings() throws IOException { DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshUserToGroupsMapings for all NNs if HA is enabled @@ -1363,7 +1363,7 @@ public int refreshSuperUserGroupsConfiguration() throws IOException { DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshSuperUserGroupsConfiguration for all NNs if HA is enabled @@ -1402,7 +1402,7 @@ public int refreshCallQueue() throws IOException { DistributedFileSystem dfs = getDFS(); URI dfsUri = dfs.getUri(); - boolean isHaEnabled = HAUtil.isLogicalUri(conf, dfsUri); + boolean isHaEnabled = HAUtilClient.isLogicalUri(conf, dfsUri); if (isHaEnabled) { // Run refreshCallQueue for all NNs if HA is enabled diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java index e9c611d8aa389..aa69dca699e4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSHAAdmin.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ha.HAServiceTarget; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.util.ToolRunner; @@ -125,7 +126,9 @@ protected int runCmd(String[] argv) throws Exception { */ @Override protected Collection getTargetIds(String namenodeToActivate) { - return DFSUtil.getNameNodeIds(getConf(), (nameserviceId != null)? nameserviceId : DFSUtil.getNamenodeNameServiceId(getConf())); + return DFSUtilClient.getNameNodeIds(getConf(), + (nameserviceId != null) ? nameserviceId : DFSUtil.getNamenodeNameServiceId( + getConf())); } public static void main(String[] argv) throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java index 4b35a757f68a5..6376243727806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java @@ -38,8 +38,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; -import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -139,8 +138,8 @@ private static FileSystem getFileSystem(Configuration conf, String url) // For backward compatibility URI fsUri = URI.create( - url.replaceFirst("^http://", WebHdfsFileSystem.SCHEME + "://") - .replaceFirst("^https://", SWebHdfsFileSystem.SCHEME + "://")); + url.replaceFirst("^http://", WebHdfsConstants.WEBHDFS_SCHEME + "://") + .replaceFirst("^https://", WebHdfsConstants.SWEBHDFS_SCHEME + "://")); return FileSystem.get(fsUri, conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java index ffca099d2dd0c..73c10d6628387 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/SWebHdfsFileSystem.java @@ -22,11 +22,9 @@ public class SWebHdfsFileSystem extends WebHdfsFileSystem { - public static final String SCHEME = "swebhdfs"; - @Override public String getScheme() { - return SCHEME; + return WebHdfsConstants.SWEBHDFS_SCHEME; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java index e5e755ad4c64e..bc3eb4bd98c9d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/TokenAspect.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.DelegationTokenRenewer; import org.apache.hadoop.fs.DelegationTokenRenewer.Renewable; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; @@ -74,8 +74,8 @@ private TokenManagementDelegator getInstance(Token token, throws IOException { final URI uri; final String scheme = getSchemeByKind(token.getKind()); - if (HAUtil.isTokenForLogicalUri(token)) { - uri = HAUtil.getServiceUriFromToken(scheme, token); + if (HAUtilClient.isTokenForLogicalUri(token)) { + uri = HAUtilClient.getServiceUriFromToken(scheme, token); } else { final InetSocketAddress address = SecurityUtil.getTokenServiceAddr (token); @@ -86,9 +86,9 @@ private TokenManagementDelegator getInstance(Token token, private static String getSchemeByKind(Text kind) { if (kind.equals(WebHdfsConstants.WEBHDFS_TOKEN_KIND)) { - return WebHdfsFileSystem.SCHEME; + return WebHdfsConstants.WEBHDFS_SCHEME; } else if (kind.equals(WebHdfsConstants.SWEBHDFS_TOKEN_KIND)) { - return SWebHdfsFileSystem.SCHEME; + return WebHdfsConstants.SWEBHDFS_SCHEME; } else { throw new IllegalArgumentException("Unsupported scheme"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 6f944c7534bd9..acdd8e14eaed2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -58,7 +58,8 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.DFSUtilClient; +import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -91,12 +92,10 @@ public class WebHdfsFileSystem extends FileSystem implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator { public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class); - /** File System URI: {SCHEME}://namenode:port/path/to/file */ - public static final String SCHEME = "webhdfs"; /** WebHdfs version. */ public static final int VERSION = 1; /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */ - public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION; + public static final String PATH_PREFIX = "/" + WebHdfsConstants.WEBHDFS_SCHEME + "/v" + VERSION; /** Default connection factory may be overridden in tests to use smaller timeout values */ protected URLConnectionFactory connectionFactory; @@ -125,7 +124,7 @@ public class WebHdfsFileSystem extends FileSystem */ @Override public String getScheme() { - return SCHEME; + return WebHdfsConstants.WEBHDFS_SCHEME; } /** @@ -157,13 +156,13 @@ public synchronized void initialize(URI uri, Configuration conf this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority()); this.nnAddrs = resolveNNAddr(); - boolean isHA = HAUtil.isClientFailoverConfigured(conf, this.uri); - boolean isLogicalUri = isHA && HAUtil.isLogicalUri(conf, this.uri); + boolean isHA = HAUtilClient.isClientFailoverConfigured(conf, this.uri); + boolean isLogicalUri = isHA && HAUtilClient.isLogicalUri(conf, this.uri); // In non-HA or non-logical URI case, the code needs to call // getCanonicalUri() in order to handle the case where no port is // specified in the URI this.tokenServiceName = isLogicalUri ? - HAUtil.buildTokenServiceForLogicalUri(uri, getScheme()) + HAUtilClient.buildTokenServiceForLogicalUri(uri, getScheme()) : SecurityUtil.buildTokenService(getCanonicalUri()); if (!isHA) { @@ -888,7 +887,6 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException { /** * Create a symlink pointing to the destination path. - * @see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean) */ public void createSymlink(Path destination, Path f, boolean createParent ) throws IOException { @@ -1432,13 +1430,13 @@ private InetSocketAddress[] resolveNNAddr() throws IOException { ArrayList ret = new ArrayList(); - if (!HAUtil.isLogicalUri(conf, uri)) { + if (!HAUtilClient.isLogicalUri(conf, uri)) { InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); ret.add(addr); } else { - Map> addresses = DFSUtil + Map> addresses = DFSUtilClient .getHaNnWebHdfsAddresses(conf, scheme); // Extract the entry corresponding to the logical name. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java index 2ff7050515706..c98ba66f38361 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfs.java @@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.ipc.RemoteException; @@ -88,7 +89,7 @@ public static void beforeClassSetup() throws Exception { conf.set(FsPermission.UMASK_LABEL, "000"); conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 0); cluster = new MiniDFSCluster.Builder(conf).build(); - webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); dfs = cluster.getFileSystem(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java index 5ef2f734aa857..681e9bd53b183 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java @@ -232,7 +232,7 @@ public static void addHAConfiguration(Configuration conf, } else { // append the nsid conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsIds + "," + logicalName); } - conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, + conf.set(DFSUtil.addKeySuffixes(HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, logicalName), "nn1,nn2"); conf.set(HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java index d16ce1cc6331e..68cc1552aa9e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java @@ -75,7 +75,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; @@ -890,7 +890,7 @@ public static void namenodeRestartTest(final Configuration conf, cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem fs = isWebHDFS ? WebHdfsTestUtil.getWebHdfsFileSystem( - conf, WebHdfsFileSystem.SCHEME) : dfs; + conf, WebHdfsConstants.WEBHDFS_SCHEME) : dfs; final URI uri = dfs.getUri(); assertTrue(HdfsUtils.isHealthy(uri)); @@ -1094,7 +1094,7 @@ private static FileSystem createFsWithDifferentUsername( final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( username, new String[]{"supergroup"}); - return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME) + return isWebHDFS? WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME) : DFSTestUtil.getFileSystemAs(ugi, conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index ed913f19dee6d..7f31f33958cf6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -216,13 +216,13 @@ public void testGetNameServiceIdException() { } /** - * Test {@link DFSUtil#getNameServiceIds(Configuration)} + * Test {@link DFSUtilClient#getNameServiceIds(Configuration)} */ @Test public void testGetNameServiceIds() { HdfsConfiguration conf = new HdfsConfiguration(); conf.set(DFS_NAMESERVICES, "nn1,nn2"); - Collection nameserviceIds = DFSUtil.getNameServiceIds(conf); + Collection nameserviceIds = DFSUtilClient.getNameServiceIds(conf); Iterator it = nameserviceIds.iterator(); assertEquals(2, nameserviceIds.size()); assertEquals("nn1", it.next().toString()); @@ -587,7 +587,7 @@ public void testGetHaNnHttpAddresses() throws IOException { Configuration conf = createWebHDFSHAConfiguration(LOGICAL_HOST_NAME, NS1_NN1_ADDR, NS1_NN2_ADDR); Map> map = - DFSUtil.getHaNnWebHdfsAddresses(conf, "webhdfs"); + DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs"); assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString()); assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 9a102065cf0e7..0689a5333c539 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -67,7 +67,7 @@ import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector; import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; @@ -533,7 +533,7 @@ public void testFileChecksum() throws Exception { } //webhdfs - final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; + final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs = ugi.doAs( new PrivilegedExceptionAction() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java index e43593df2e709..b211ffb436c98 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java @@ -21,10 +21,8 @@ import java.io.File; import java.io.IOException; import java.io.PrintStream; -import java.io.PrintWriter; import java.io.RandomAccessFile; import java.io.StringReader; -import java.io.StringWriter; import java.net.URI; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; @@ -44,7 +42,6 @@ import org.apache.hadoop.crypto.CryptoProtocolVersion; import org.apache.hadoop.crypto.key.JavaKeyStoreProvider; import org.apache.hadoop.crypto.key.KeyProvider; -import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderFactory; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; @@ -72,7 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck; import org.apache.hadoop.hdfs.tools.DFSck; import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.security.AccessControlException; @@ -612,7 +609,7 @@ public void testReadWriteUsingWebHdfs() throws Exception { final HdfsAdmin dfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf); final FileSystem webHdfsFs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); final Path zone = new Path("/zone"); fs.mkdirs(zone); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java index 163378c691649..4541e6950c0e1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestQuota.java @@ -35,7 +35,7 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.hdfs.tools.DFSAdmin; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; @@ -858,7 +858,7 @@ public void testBlockAllocationAdjustsUsageConservatively() DFSAdmin admin = new DFSAdmin(conf); final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); - final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; + final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf); @@ -922,7 +922,7 @@ public void testMultipleFilesSmallerThanOneBlock() throws Exception { DFSAdmin admin = new DFSAdmin(conf); final String nnAddr = conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); - final String webhdfsuri = WebHdfsFileSystem.SCHEME + "://" + nnAddr; + final String webhdfsuri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + nnAddr; System.out.println("webhdfsuri=" + webhdfsuri); final FileSystem webhdfs = new Path(webhdfsuri).getFileSystem(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java index 98e4b1e91bd25..403b21e0c21b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/sasl/SaslDataTransferTestCase.java @@ -39,7 +39,6 @@ import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.minikdc.MiniKdc; import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.AfterClass; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java index dff8fa4ba9a1e..13d34d9f72138 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.AccessControlException; @@ -169,7 +170,7 @@ public void testAddDelegationTokensDFSApi() throws Exception { @Test public void testDelegationTokenWebHdfsApi() throws Exception { ((Log4JLogger)NamenodeWebHdfsMethods.LOG).getLogger().setLevel(Level.ALL); - final String uri = WebHdfsFileSystem.SCHEME + "://" + final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); //get file system as JobTracker final UserGroupInformation ugi = UserGroupInformation.createUserForTesting( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java index 3bbd6f15a9153..9d0301ca62e61 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.security.TestDoAsEffectiveUser; @@ -149,7 +150,7 @@ public Token[] run() throws IOException { public void testWebHdfsDoAs() throws Exception { WebHdfsTestUtil.LOG.info("START: testWebHdfsDoAs()"); WebHdfsTestUtil.LOG.info("ugi.getShortUserName()=" + ugi.getShortUserName()); - final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsFileSystem.SCHEME); + final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, config, WebHdfsConstants.WEBHDFS_SCHEME); final Path root = new Path("/"); cluster.getFileSystem().setPermission(root, new FsPermission((short)0777)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 32fae45a09a4e..296003ffb46cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -50,7 +50,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestBlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.Block; -import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager.StatefulBlockInfo; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState; @@ -63,7 +62,6 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; import org.apache.hadoop.test.PathUtils; -import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.Logger; import org.apache.log4j.spi.LoggingEvent; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java index 217d6b572d9b6..59fd18f176bc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/web/webhdfs/TestParameterParser.java @@ -19,7 +19,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSTestUtil; -import org.apache.hadoop.hdfs.HAUtil; +import org.apache.hadoop.hdfs.HAUtilClient; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.web.resources.DelegationParam; import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam; @@ -30,11 +30,8 @@ import io.netty.handler.codec.http.QueryStringDecoder; -import javax.servlet.ServletContext; - import java.io.IOException; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; public class TestParameterParser { @@ -51,7 +48,7 @@ public void testDeserializeHAToken() throws IOException { + DelegationParam.NAME + "=" + token.encodeToUrlString()); ParameterParser testParser = new ParameterParser(decoder, conf); final Token tok2 = testParser.delegationToken(); - Assert.assertTrue(HAUtil.isTokenForLogicalUri(tok2)); + Assert.assertTrue(HAUtilClient.isTokenForLogicalUri(tok2)); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java index 7d062418ccf4f..0699c31cf3a92 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogs.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.web.WebHdfsConstants; import org.apache.hadoop.hdfs.web.WebHdfsTestUtil; import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.AccessControlException; @@ -196,7 +197,7 @@ public void testAuditWebHdfs() throws Exception { setupAuditLogs(); - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); istream.close(); @@ -215,7 +216,7 @@ public void testAuditWebHdfsStat() throws Exception { setupAuditLogs(); - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME); FileStatus st = webfs.getFileStatus(file); verifyAuditLogs(true); @@ -232,7 +233,7 @@ public void testAuditWebHdfsDenied() throws Exception { setupAuditLogs(); try { - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME); InputStream istream = webfs.open(file); int val = istream.read(); fail("open+read must not succeed, got " + val); @@ -252,7 +253,7 @@ public void testAuditWebHdfsOpen() throws Exception { setupAuditLogs(); - WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsFileSystem.SCHEME); + WebHdfsFileSystem webfs = WebHdfsTestUtil.getWebHdfsFileSystemAs(userGroupInfo, conf, WebHdfsConstants.WEBHDFS_SCHEME); webfs.open(file); verifyAuditLogsCheckPattern(true, 3, webOpenPattern); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java index 2515da12f0f3a..b85e5ee447ca4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMalformedURLs.java @@ -19,15 +19,15 @@ package org.apache.hadoop.hdfs.server.namenode; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import static org.junit.Assert.assertNotEquals; + import org.junit.After; import org.junit.Before; import org.junit.Test; -import org.apache.hadoop.hdfs.DFSConfigKeys; - public class TestMalformedURLs { private MiniDFSCluster cluster; Configuration config; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java index 55926cc9d3993..dca777f77fa72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java @@ -21,6 +21,7 @@ import static org.junit.Assert.assertThat; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsNot.not; + import org.junit.Test; import org.apache.hadoop.fs.FileUtil; @@ -194,7 +195,7 @@ public void testHttpBindHostKey() throws IOException { private static void setupSsl() throws Exception { Configuration conf = new Configuration(); conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name()); - conf.set(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); + conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0"); conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0"); File base = new File(BASEDIR); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java index fced3b0296811..2daa9f87181f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java @@ -281,8 +281,8 @@ public void testHAUtilClonesDelegationTokens() throws Exception { UserGroupInformation ugi = UserGroupInformation.createRemoteUser("test"); URI haUri = new URI("hdfs://my-ha-uri/"); - token.setService(HAUtil.buildTokenServiceForLogicalUri(haUri, - HdfsConstants.HDFS_URI_SCHEME)); + token.setService(HAUtilClient.buildTokenServiceForLogicalUri(haUri, + HdfsConstants.HDFS_URI_SCHEME)); ugi.addToken(token); Collection nnAddrs = new HashSet(); @@ -338,8 +338,8 @@ public void testHAUtilClonesDelegationTokens() throws Exception { @Test(timeout = 300000) public void testDFSGetCanonicalServiceName() throws Exception { URI hAUri = HATestUtil.getLogicalUri(cluster); - String haService = HAUtil.buildTokenServiceForLogicalUri(hAUri, - HdfsConstants.HDFS_URI_SCHEME).toString(); + String haService = HAUtilClient.buildTokenServiceForLogicalUri(hAUri, + HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService, dfs.getCanonicalServiceName()); final String renewer = UserGroupInformation.getCurrentUser().getShortUserName(); final Token token = @@ -355,8 +355,8 @@ public void testHdfsGetCanonicalServiceName() throws Exception { Configuration conf = dfs.getConf(); URI haUri = HATestUtil.getLogicalUri(cluster); AbstractFileSystem afs = AbstractFileSystem.createFileSystem(haUri, conf); - String haService = HAUtil.buildTokenServiceForLogicalUri(haUri, - HdfsConstants.HDFS_URI_SCHEME).toString(); + String haService = HAUtilClient.buildTokenServiceForLogicalUri(haUri, + HdfsConstants.HDFS_URI_SCHEME).toString(); assertEquals(haService, afs.getCanonicalServiceName()); Token token = afs.getDelegationTokens( UserGroupInformation.getCurrentUser().getShortUserName()).get(0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java index ec872505d5b40..c4a29883eed48 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAConfiguration.java @@ -60,7 +60,7 @@ public void testCheckpointerValidityChecks() throws Exception { private Configuration getHAConf(String nsId, String host1, String host2) { Configuration conf = new Configuration(); - conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsId); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, nsId); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, nsId), "nn1,nn2"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java index 33da4d4946ffc..53372ea2cdee7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDFSHAAdmin.java @@ -87,10 +87,10 @@ public class TestDFSHAAdmin { private HdfsConfiguration getHAConf() { HdfsConfiguration conf = new HdfsConfiguration(); - conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, NSID); conf.set(DFSConfigKeys.DFS_NAMESERVICE_ID, NSID); conf.set(DFSUtil.addKeySuffixes( - DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2"); + DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, NSID), "nn1,nn2"); conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); conf.set(DFSUtil.addKeySuffixes( DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, NSID, "nn1"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java index 20b25f47de8f7..ddbc69b947843 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestFSMainOperationsWebHdfs.java @@ -80,7 +80,7 @@ public static void setupCluster() { cluster.getFileSystem().setPermission( new Path("/"), new FsPermission((short)0777)); - final String uri = WebHdfsFileSystem.SCHEME + "://" + final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); //get file system as a non-superuser diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java index 3405c686af808..8cee88f3086ea 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestHttpsFileSystem.java @@ -21,7 +21,6 @@ import java.io.InputStream; import java.io.OutputStream; import java.net.InetSocketAddress; -import java.net.URI; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataOutputStream; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 2d8892c957f51..6733731e679cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -43,7 +43,6 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; -import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -121,7 +120,7 @@ static void largeFileTest(final long fileLength) throws Exception { try { cluster.waitActive(); - final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + final FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); final Path dir = new Path("/test/largeFile"); Assert.assertTrue(fs.mkdirs(dir)); @@ -249,7 +248,7 @@ public void testLargeDirectory() throws Exception { new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); try { cluster.waitActive(); - WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME) + WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME) .setPermission(new Path("/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); @@ -264,7 +263,7 @@ public void testLargeDirectory() throws Exception { @Override public Void run() throws IOException, URISyntaxException { FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); Path d = new Path("/my-dir"); Assert.assertTrue(fs.mkdirs(d)); for (int i=0; i < listLimit*3; i++) { @@ -288,7 +287,7 @@ public void testNumericalUserName() throws Exception { new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); try { cluster.waitActive(); - WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME) + WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME) .setPermission(new Path("/"), new FsPermission(FsAction.ALL, FsAction.ALL, FsAction.ALL)); @@ -297,7 +296,7 @@ public void testNumericalUserName() throws Exception { @Override public Void run() throws IOException, URISyntaxException { FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); Path d = new Path("/my-dir"); Assert.assertTrue(fs.mkdirs(d)); return null; @@ -321,7 +320,7 @@ public void testCreateWithNoDN() throws Exception { conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); cluster.waitActive(); FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); fs.create(new Path("/testnodatanode")); Assert.fail("No exception was thrown"); } catch (IOException ex) { @@ -345,7 +344,7 @@ public void testWebHdfsCreateSnapshot() throws Exception { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); @@ -387,7 +386,7 @@ public void testWebHdfsDeleteSnapshot() throws Exception { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); @@ -423,7 +422,7 @@ public void testWebHdfsRenameSnapshot() throws Exception { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); final Path foo = new Path("/foo"); dfs.mkdirs(foo); @@ -465,7 +464,7 @@ public void testRaceWhileNNStartup() throws Exception { final Path foo = new Path("/foo"); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); try { webHdfs.mkdirs(foo); fail("Expected RetriableException"); @@ -491,7 +490,7 @@ public void testDTInInsecureClusterWithFallback() try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); Assert.assertNull(webHdfs.getDelegationToken(null)); } finally { if (cluster != null) { @@ -507,7 +506,7 @@ public void testDTInInsecureCluster() throws Exception { try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); webHdfs.getDelegationToken(null); fail("No exception is thrown."); } catch (AccessControlException ace) { @@ -532,7 +531,7 @@ public void testWebHdfsOffsetAndLength() throws Exception{ try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); final WebHdfsFileSystem fs = - WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); try (OutputStream os = fs.create(new Path(PATH))) { os.write(CONTENTS); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java index 6b44b26e41580..a285cd303c9ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSAcl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdfs.web; import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest; -import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; import org.apache.hadoop.security.UserGroupInformation; import org.junit.BeforeClass; import org.junit.Ignore; @@ -53,7 +52,7 @@ public void testDefaultAclNewSymlinkIntermediate() { */ @Override protected WebHdfsFileSystem createFileSystem() throws Exception { - return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); } /** @@ -67,6 +66,6 @@ protected WebHdfsFileSystem createFileSystem() throws Exception { protected WebHdfsFileSystem createFileSystem(UserGroupInformation user) throws Exception { return WebHdfsTestUtil.getWebHdfsFileSystemAs(user, conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java index da45cbfae60b7..9f78548b9c05d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSForHA.java @@ -55,7 +55,6 @@ import org.apache.hadoop.security.token.Token; import org.junit.Assert; import org.junit.Test; -import org.mockito.Mockito; import org.mockito.internal.util.reflection.Whitebox; import org.mortbay.util.ajax.JSON; @@ -64,7 +63,7 @@ public class TestWebHDFSForHA { private static final String LOGICAL_NAME = "minidfs"; - private static final URI WEBHDFS_URI = URI.create(WebHdfsFileSystem.SCHEME + + private static final URI WEBHDFS_URI = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" + LOGICAL_NAME); private static final MiniDFSNNTopology topo = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf(LOGICAL_NAME).addNN( diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java index 8b829734ef712..12110579bf08b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFSXAttr.java @@ -31,6 +31,6 @@ public class TestWebHDFSXAttr extends FSXAttrBaseTest { */ @Override protected WebHdfsFileSystem createFileSystem() throws Exception { - return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + return WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index b2250fe5793d7..bc10bca667c7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -42,7 +42,6 @@ import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.AppendTestUtil; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.web.resources.*; import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam; @@ -78,7 +77,7 @@ protected void setUp() throws Exception { final UserGroupInformation current = UserGroupInformation.getCurrentUser(); ugi = UserGroupInformation.createUserForTesting( current.getShortUserName() + "x", new String[]{"user"}); - fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME); + fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME); defaultWorkingDirectory = fs.getWorkingDirectory().toUri().getPath(); } @@ -539,7 +538,7 @@ public void testAccess() throws IOException, InterruptedException { UserGroupInformation ugi = UserGroupInformation.createUserForTesting("alpha", new String[]{"beta"}); WebHdfsFileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystemAs(ugi, conf, - WebHdfsFileSystem.SCHEME); + WebHdfsConstants.WEBHDFS_SCHEME); fs.mkdirs(p1); fs.setPermission(p1, new FsPermission((short) 0444)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 13a5a534775a1..bd4d693795bfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -83,7 +83,7 @@ public void setUp() throws Exception { serverSocket = new ServerSocket(0, CONNECTION_BACKLOG); nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort()); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort()); - fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); fs.connectionFactory = connectionFactory; clients = new ArrayList(); serverThread = null; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java index 69d1a0456f059..2913a97d309e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java @@ -53,7 +53,7 @@ public class TestWebHdfsUrl { // NOTE: port is never used - final URI uri = URI.create(WebHdfsFileSystem.SCHEME + "://" + "127.0.0.1:0"); + final URI uri = URI.create(WebHdfsConstants.WEBHDFS_SCHEME + "://" + "127.0.0.1:0"); @Before public void resetUGI() { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java index aeda32ccad742..cb9a2ca8779a9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsWithMultipleNameNodes.java @@ -27,7 +27,6 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -80,7 +79,7 @@ private static void setupCluster(final int nNameNodes, final int nDataNodes) webhdfs = new WebHdfsFileSystem[nNameNodes]; for(int i = 0; i < webhdfs.length; i++) { final InetSocketAddress addr = cluster.getNameNode(i).getHttpAddress(); - final String uri = WebHdfsFileSystem.SCHEME + "://" + final String uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + addr.getHostName() + ":" + addr.getPort() + "/"; webhdfs[i] = (WebHdfsFileSystem)FileSystem.get(new URI(uri), conf); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java index 70f9735407938..58de14ba9171f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -50,11 +49,11 @@ public static WebHdfsFileSystem getWebHdfsFileSystem( URISyntaxException { final String uri; - if (WebHdfsFileSystem.SCHEME.equals(scheme)) { - uri = WebHdfsFileSystem.SCHEME + "://" + if (WebHdfsConstants.WEBHDFS_SCHEME.equals(scheme)) { + uri = WebHdfsConstants.WEBHDFS_SCHEME + "://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); - } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { - uri = SWebHdfsFileSystem.SCHEME + "://" + } else if (WebHdfsConstants.SWEBHDFS_SCHEME.equals(scheme)) { + uri = WebHdfsConstants.SWEBHDFS_SCHEME + "://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); } else { throw new IllegalArgumentException("unknown scheme:" + scheme); @@ -65,7 +64,7 @@ public static WebHdfsFileSystem getWebHdfsFileSystem( public static WebHdfsFileSystem getWebHdfsFileSystemAs( final UserGroupInformation ugi, final Configuration conf ) throws IOException, InterruptedException { - return getWebHdfsFileSystemAs(ugi, conf, WebHdfsFileSystem.SCHEME); + return getWebHdfsFileSystemAs(ugi, conf, WebHdfsConstants.WEBHDFS_SCHEME); } public static WebHdfsFileSystem getWebHdfsFileSystemAs( @@ -74,7 +73,7 @@ public static WebHdfsFileSystem getWebHdfsFileSystemAs( return ugi.doAs(new PrivilegedExceptionAction() { @Override public WebHdfsFileSystem run() throws Exception { - return getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); + return getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); } }); }