diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java index 668fdaa169c..0771f4ce137 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java @@ -18,10 +18,23 @@ package org.apache.hadoop.hdds.scm; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.io.InterruptedIOException; +import java.security.cert.X509Certificate; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Semaphore; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; @@ -41,6 +54,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.util.Time; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import io.opentracing.Scope; import io.opentracing.util.GlobalTracer; import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; @@ -52,28 +67,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.io.InterruptedIOException; -import java.security.cert.X509Certificate; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - /** * A Client for the storageContainer protocol for read object data. */ public class XceiverClientGrpc extends XceiverClientSpi { static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class); private final Pipeline pipeline; - private final Configuration config; + private final ConfigurationSource config; private Map asyncStubs; private XceiverClientMetrics metrics; private Map channels; @@ -94,7 +94,7 @@ public class XceiverClientGrpc extends XceiverClientSpi { * @param config -- Ozone Config * @param caCert - SCM ca certificate. */ - public XceiverClientGrpc(Pipeline pipeline, Configuration config, + public XceiverClientGrpc(Pipeline pipeline, ConfigurationSource config, X509Certificate caCert) { super(); Preconditions.checkNotNull(pipeline); @@ -121,7 +121,7 @@ public XceiverClientGrpc(Pipeline pipeline, Configuration config, * @param pipeline - Pipeline that defines the machines. * @param config -- Ozone Config */ - public XceiverClientGrpc(Pipeline pipeline, Configuration config) { + public XceiverClientGrpc(Pipeline pipeline, ConfigurationSource config) { this(pipeline, config, null); } diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index 4ceff0b2051..0cfaca74679 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -24,10 +24,11 @@ import com.google.common.cache.CacheBuilder; import com.google.common.cache.RemovalListener; import com.google.common.cache.RemovalNotification; -import org.apache.hadoop.conf.Configuration; + import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -69,7 +70,7 @@ public class XceiverClientManager implements Closeable { private static final Logger LOG = LoggerFactory.getLogger(XceiverClientManager.class); //TODO : change this to SCM configuration class - private final Configuration conf; + private final ConfigurationSource conf; private final Cache clientCache; private X509Certificate caCert; @@ -83,12 +84,13 @@ public class XceiverClientManager implements Closeable { * * @param conf configuration */ - public XceiverClientManager(Configuration conf) throws IOException { + public XceiverClientManager(ConfigurationSource conf) throws IOException { this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class), null); } - public XceiverClientManager(Configuration conf, ScmClientConfig clientConf, + public XceiverClientManager(ConfigurationSource conf, + ScmClientConfig clientConf, String caCertPem) throws IOException { Preconditions.checkNotNull(clientConf); Preconditions.checkNotNull(conf); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index 0d12355e874..ae5fe67747b 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -34,20 +34,25 @@ import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.ratis.RatisHelper; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import io.opentracing.Scope; +import io.opentracing.util.GlobalTracer; import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.proto.RaftProtos; @@ -61,12 +66,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; - -import io.opentracing.Scope; -import io.opentracing.util.GlobalTracer; - /** * An abstract implementation of {@link XceiverClientSpi} using Ratis. * The underlying RPC mechanism can be chosen via the constructor. @@ -77,13 +76,13 @@ public final class XceiverClientRatis extends XceiverClientSpi { public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, - Configuration ozoneConf) { + ConfigurationSource ozoneConf) { return newXceiverClientRatis(pipeline, ozoneConf, null); } public static XceiverClientRatis newXceiverClientRatis( org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline, - Configuration ozoneConf, X509Certificate caCert) { + ConfigurationSource ozoneConf, X509Certificate caCert) { final String rpcType = ozoneConf .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); @@ -100,7 +99,7 @@ public static XceiverClientRatis newXceiverClientRatis( private final AtomicReference client = new AtomicReference<>(); private final RetryPolicy retryPolicy; private final GrpcTlsConfig tlsConfig; - private final Configuration ozoneConfiguration; + private final ConfigurationSource ozoneConfiguration; // Map to track commit index at every server private final ConcurrentHashMap commitInfoMap; @@ -112,7 +111,7 @@ public static XceiverClientRatis newXceiverClientRatis( */ private XceiverClientRatis(Pipeline pipeline, RpcType rpcType, RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig, - Configuration configuration) { + ConfigurationSource configuration) { super(); this.pipeline = pipeline; this.rpcType = rpcType; diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java index 67899691694..8a6518d103c 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java @@ -29,9 +29,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -213,17 +213,16 @@ public static void checkNotNull(T... references) { * @param conf Configuration object * @return list cache size */ - public static int getListCacheSize(Configuration conf) { + public static int getListCacheSize(ConfigurationSource conf) { return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE, OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT); } - /** * Returns the maximum no of outstanding async requests to be handled by * Standalone and Ratis client. */ - public static int getMaxOutstandingRequests(Configuration config) { + public static int getMaxOutstandingRequests(ConfigurationSource config) { return OzoneConfiguration.of(config) .getObject(RatisClientConfig.class) .getMaxOutstandingRequests(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java index acbd4563d7c..d1c2bc3bcd1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java @@ -33,11 +33,11 @@ import java.util.OptionalInt; import java.util.TimeZone; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -89,7 +89,8 @@ private HddsUtils() { * * @return Target {@code InetSocketAddress} for the SCM client endpoint. */ - public static InetSocketAddress getScmAddressForClients(Configuration conf) { + public static InetSocketAddress getScmAddressForClients( + ConfigurationSource conf) { Optional host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); @@ -116,7 +117,7 @@ public static InetSocketAddress getScmAddressForClients(Configuration conf) { * @throws IllegalArgumentException if configuration is not defined. */ public static InetSocketAddress getScmAddressForBlockClients( - Configuration conf) { + ConfigurationSource conf) { Optional host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); @@ -147,7 +148,8 @@ public static InetSocketAddress getScmAddressForBlockClients( * @throws IllegalArgumentException if any values are not in the 'host' * or host:port format. */ - public static Optional getHostNameFromConfigKeys(Configuration conf, + public static Optional getHostNameFromConfigKeys( + ConfigurationSource conf, String... keys) { for (final String key : keys) { final String value = conf.getTrimmed(key); @@ -206,7 +208,7 @@ public static OptionalInt getHostPort(String value) { * or host:port format. */ public static OptionalInt getPortNumberFromConfigKeys( - Configuration conf, String... keys) { + ConfigurationSource conf, String... keys) { for (final String key : keys) { final String value = conf.getTrimmed(key); final OptionalInt hostPort = getHostPort(value); @@ -224,7 +226,7 @@ public static OptionalInt getPortNumberFromConfigKeys( * @throws IllegalArgumentException If the configuration is invalid */ public static Collection getSCMAddresses( - Configuration conf) { + ConfigurationSource conf) { Collection names = conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES); if (names.isEmpty()) { @@ -255,7 +257,7 @@ public static Collection getSCMAddresses( * @throws IllegalArgumentException If the configuration is invalid */ public static InetSocketAddress getReconAddresses( - Configuration conf) { + ConfigurationSource conf) { String name = conf.get(OZONE_RECON_ADDRESS_KEY); if (StringUtils.isEmpty(name)) { return null; @@ -277,7 +279,8 @@ public static InetSocketAddress getReconAddresses( * @throws IllegalArgumentException if {@code conf} has more than one SCM * address or it has none */ - public static InetSocketAddress getSingleSCMAddress(Configuration conf) { + public static InetSocketAddress getSingleSCMAddress( + ConfigurationSource conf) { Collection singleton = getSCMAddresses(conf); Preconditions.checkArgument(singleton.size() == 1, MULTIPLE_SCM_NOT_YET_SUPPORTED); @@ -295,7 +298,7 @@ public static InetSocketAddress getSingleSCMAddress(Configuration conf) { * @throws UnknownHostException if the dfs.datanode.dns.interface * option is used and the hostname can not be determined */ - public static String getHostName(Configuration conf) + public static String getHostName(ConfigurationSource conf) throws UnknownHostException { String name = conf.get(DFS_DATANODE_HOST_NAME_KEY); if (name == null) { @@ -498,7 +501,7 @@ public static void validatePath(Path path, Path ancestor) { * @param alias name of the credential to retreive * @return String credential value or null */ - static String getPassword(Configuration conf, String alias) { + static String getPassword(ConfigurationSource conf, String alias) { String password = null; try { char[] passchars = conf.getPassword(alias); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java index fda3c860e7e..c15f06a2de7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.conf; -import javax.annotation.PostConstruct; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; import javax.xml.bind.Unmarshaller; @@ -27,30 +26,44 @@ import javax.xml.bind.annotation.XmlElement; import javax.xml.bind.annotation.XmlRootElement; import java.io.IOException; -import java.lang.reflect.Field; -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; import java.net.URL; import java.util.ArrayList; +import java.util.Collection; import java.util.Enumeration; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.stream.Collectors; -import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; + +import com.google.common.base.Preconditions; /** * Configuration for ozone. */ @InterfaceAudience.Private -public class OzoneConfiguration extends Configuration { +public class OzoneConfiguration extends Configuration + implements ConfigurationSource { static { activate(); } + public static OzoneConfiguration of(ConfigurationSource source) { + if (source instanceof LegacyHadoopConfigurationSource) { + return new OzoneConfiguration(((LegacyHadoopConfigurationSource) source) + .getOriginalHadoopConfiguration()); + } + return (OzoneConfiguration) source; + } + + public static OzoneConfiguration of(OzoneConfiguration source) { + return source; + } + public static OzoneConfiguration of(Configuration conf) { Preconditions.checkNotNull(conf); @@ -98,157 +111,6 @@ public List readPropertyFromXml(URL url) throws JAXBException { return config.getProperties(); } - /** - * Create a Configuration object and inject the required configuration values. - * - * @param configurationClass The class where the fields are annotated with - * the configuration. - * @return Initiated java object where the config fields are injected. - */ - public T getObject(Class configurationClass) { - - T configObject; - - try { - configObject = configurationClass.newInstance(); - } catch (InstantiationException | IllegalAccessException e) { - throw new ConfigurationException( - "Configuration class can't be created: " + configurationClass, e); - } - ConfigGroup configGroup = - configurationClass.getAnnotation(ConfigGroup.class); - - String prefix = configGroup.prefix(); - - injectConfiguration(configurationClass, configObject, prefix); - - callPostConstruct(configurationClass, configObject); - - return configObject; - - } - - private void injectConfiguration(Class configurationClass, - T configObject, String prefix) { - injectConfigurationToObject(configurationClass, configObject, prefix); - Class superClass = configurationClass.getSuperclass(); - while (superClass != null) { - injectConfigurationToObject(superClass, configObject, prefix); - superClass = superClass.getSuperclass(); - } - } - - private void callPostConstruct(Class configurationClass, - T configObject) { - for (Method method : configurationClass.getMethods()) { - if (method.isAnnotationPresent(PostConstruct.class)) { - try { - method.invoke(configObject); - } catch (IllegalAccessException ex) { - throw new IllegalArgumentException( - "@PostConstruct method in " + configurationClass - + " is not accessible"); - } catch (InvocationTargetException e) { - if (e.getCause() instanceof RuntimeException) { - throw (RuntimeException) e.getCause(); - } else { - throw new IllegalArgumentException( - "@PostConstruct can't be executed on " + configurationClass - + " after configObject " - + "injection", e); - } - } - } - } - } - - private void injectConfigurationToObject(Class configurationClass, - T configuration, String prefix) { - for (Field field : configurationClass.getDeclaredFields()) { - if (field.isAnnotationPresent(Config.class)) { - - String fieldLocation = - configurationClass + "." + field.getName(); - - Config configAnnotation = field.getAnnotation(Config.class); - - String key = prefix + "." + configAnnotation.key(); - - ConfigType type = configAnnotation.type(); - - if (type == ConfigType.AUTO) { - type = detectConfigType(field.getType(), fieldLocation); - } - - //Note: default value is handled by ozone-default.xml. Here we can - //use any default. - try { - switch (type) { - case STRING: - forcedFieldSet(field, configuration, get(key)); - break; - case INT: - forcedFieldSet(field, configuration, getInt(key, 0)); - break; - case BOOLEAN: - forcedFieldSet(field, configuration, getBoolean(key, false)); - break; - case LONG: - forcedFieldSet(field, configuration, getLong(key, 0)); - break; - case TIME: - forcedFieldSet(field, configuration, - getTimeDuration(key, 0, configAnnotation.timeUnit())); - break; - default: - throw new ConfigurationException( - "Unsupported ConfigType " + type + " on " + fieldLocation); - } - } catch (IllegalAccessException e) { - throw new ConfigurationException( - "Can't inject configuration to " + fieldLocation, e); - } - - } - } - } - - /** - * Set the value of one field even if it's private. - */ - private void forcedFieldSet(Field field, T object, Object value) - throws IllegalAccessException { - boolean accessChanged = false; - if (!field.isAccessible()) { - field.setAccessible(true); - accessChanged = true; - } - field.set(object, value); - if (accessChanged) { - field.setAccessible(false); - } - } - - private ConfigType detectConfigType(Class parameterType, - String methodLocation) { - ConfigType type; - if (parameterType == String.class) { - type = ConfigType.STRING; - } else if (parameterType == Integer.class || parameterType == int.class) { - type = ConfigType.INT; - } else if (parameterType == Long.class || parameterType == long.class) { - type = ConfigType.LONG; - } else if (parameterType == Boolean.class - || parameterType == boolean.class) { - type = ConfigType.BOOLEAN; - } else { - throw new ConfigurationException( - "Unsupported configuration type " + parameterType + " in " - + methodLocation); - } - return type; - } - /** * Class to marshall/un-marshall configuration from xml files. */ @@ -379,6 +241,14 @@ public Properties getAllPropertiesByTag(String tag) { return props; } + @Override + public Collection getConfigKeys() { + return getProps().keySet() + .stream() + .map(Object::toString) + .collect(Collectors.toList()); + } + @Override public Map getPropsWithPrefix(String confPrefix) { Properties props = getProps(); @@ -392,4 +262,5 @@ public Map getPropsWithPrefix(String confPrefix) { } return configMap; } + } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java index 118da1f58ff..5b57b34c322 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java @@ -17,16 +17,16 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.time.Duration; + import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; import org.apache.hadoop.hdds.conf.ConfigTag; import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.File; -import java.time.Duration; - /** * Uses DU for all volumes. Saves used value in cache file. */ @@ -40,7 +40,8 @@ public class DUFactory implements SpaceUsageCheckFactory { private Conf conf; @Override - public SpaceUsageCheckFactory setConfiguration(Configuration configuration) { + public SpaceUsageCheckFactory setConfiguration( + ConfigurationSource configuration) { conf = OzoneConfiguration.of(configuration).getObject(Conf.class); return this; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java index 37953d915ce..3ed74c99249 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java @@ -17,16 +17,16 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.time.Duration; + import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; import org.apache.hadoop.hdds.conf.ConfigTag; import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import java.io.File; -import java.time.Duration; - /** * Uses DedicatedDiskSpaceUsage for all volumes. Does not save results since * the information is relatively cheap to obtain. @@ -38,7 +38,8 @@ public class DedicatedDiskSpaceUsageFactory implements SpaceUsageCheckFactory { private Conf conf; @Override - public SpaceUsageCheckFactory setConfiguration(Configuration configuration) { + public SpaceUsageCheckFactory setConfiguration( + ConfigurationSource configuration) { conf = OzoneConfiguration.of(configuration).getObject(Conf.class); return this; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java index 3a3960b5e6b..0205de58a44 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java @@ -17,21 +17,22 @@ */ package org.apache.hadoop.hdds.fs; +import java.io.File; +import java.io.UncheckedIOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; + import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; import org.apache.hadoop.hdds.conf.ConfigTag; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.UncheckedIOException; -import java.lang.reflect.Constructor; -import java.lang.reflect.InvocationTargetException; - /** * Configures disk space checks (du, df, etc.) for HDDS volumes, allowing * different implementations and parameters for different volumes. @@ -57,7 +58,7 @@ public interface SpaceUsageCheckFactory { * Updates the factory with global configuration. * @return factory configured with {@code conf} */ - default SpaceUsageCheckFactory setConfiguration(Configuration conf) { + default SpaceUsageCheckFactory setConfiguration(ConfigurationSource conf) { // override if configurable return this; } @@ -68,14 +69,16 @@ default SpaceUsageCheckFactory setConfiguration(Configuration conf) { * Defaults to {@link DUFactory} if no class is configured or it cannot be * instantiated. */ - static SpaceUsageCheckFactory create(Configuration config) { + static SpaceUsageCheckFactory create(ConfigurationSource config) { Conf conf = OzoneConfiguration.of(config).getObject(Conf.class); Class aClass = null; String className = conf.getClassName(); if (className != null && !className.isEmpty()) { try { - aClass = config.getClassByName(className) - .asSubclass(SpaceUsageCheckFactory.class); + aClass = + SpaceUsageCheckFactory.class + .getClassLoader().loadClass(className) + .asSubclass(SpaceUsageCheckFactory.class); } catch (ClassNotFoundException | RuntimeException e) { Logger log = LoggerFactory.getLogger(SpaceUsageCheckFactory.class); log.warn("Error trying to create SpaceUsageCheckFactory: '{}'", @@ -91,7 +94,8 @@ static SpaceUsageCheckFactory create(Configuration config) { aClass.getConstructor(); instance = constructor.newInstance(); } catch (IllegalAccessException | InstantiationException | - InvocationTargetException | NoSuchMethodException e) { + InvocationTargetException | NoSuchMethodException | + ClassCastException e) { Logger log = LoggerFactory.getLogger(SpaceUsageCheckFactory.class); log.warn("Error trying to create {}", aClass, e); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 8fab63365ac..08ead55cca6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -28,7 +28,7 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -146,27 +146,27 @@ static RaftGroup newRaftGroup(Pipeline pipeline) { static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline, RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig, - Configuration ozoneConfiguration) throws IOException { + ConfigurationSource ozoneConfiguration) throws IOException { return newRaftClient(rpcType, toRaftPeerId(pipeline.getLeaderNode()), newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()), pipeline.getNodes()), retryPolicy, tlsConfig, ozoneConfiguration); } - static RpcType getRpcType(Configuration conf) { + static RpcType getRpcType(ConfigurationSource conf) { return SupportedRpcType.valueOfIgnoreCase(conf.get( ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT)); } - static RaftClient newRaftClient(RaftPeer leader, Configuration conf) { + static RaftClient newRaftClient(RaftPeer leader, ConfigurationSource conf) { return newRaftClient(getRpcType(conf), leader, RatisHelper.createRetryPolicy(conf), conf); } static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig, - Configuration configuration) { + ConfigurationSource configuration) { return newRaftClient(rpcType, leader.getId(), newRaftGroup(Collections.singletonList(leader)), retryPolicy, tlsConfig, configuration); @@ -174,7 +174,7 @@ static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, RetryPolicy retryPolicy, - Configuration ozoneConfiguration) { + ConfigurationSource ozoneConfiguration) { return newRaftClient(rpcType, leader.getId(), newRaftGroup(Collections.singletonList(leader)), retryPolicy, null, ozoneConfiguration); @@ -183,7 +183,7 @@ static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader, @SuppressWarnings("checkstyle:ParameterNumber") static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, RaftGroup group, RetryPolicy retryPolicy, - GrpcTlsConfig tlsConfig, Configuration ozoneConfiguration) { + GrpcTlsConfig tlsConfig, ConfigurationSource ozoneConfiguration) { if (LOG.isTraceEnabled()) { LOG.trace("newRaftClient: {}, leader={}, group={}", rpcType, leader, group); @@ -215,7 +215,7 @@ static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader, * @param ozoneConf * @param raftProperties */ - static void createRaftClientProperties(Configuration ozoneConf, + static void createRaftClientProperties(ConfigurationSource ozoneConf, RaftProperties raftProperties) { // As for client we do not require server and grpc server/tls. exclude them. @@ -238,7 +238,7 @@ static void createRaftClientProperties(Configuration ozoneConf, * @param ozoneConf * @param raftProperties */ - static void createRaftServerProperties(Configuration ozoneConf, + static void createRaftServerProperties(ConfigurationSource ozoneConf, RaftProperties raftProperties) { Map ratisServerConf = @@ -253,7 +253,7 @@ static void createRaftServerProperties(Configuration ozoneConf, static Map getDatanodeRatisPrefixProps( - Configuration configuration) { + ConfigurationSource configuration) { return configuration.getPropsWithPrefix(HDDS_DATANODE_RATIS_PREFIX_KEY); } @@ -269,11 +269,7 @@ static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf, return tlsConfig; } - - - - - static RetryPolicy createRetryPolicy(Configuration conf) { + static RetryPolicy createRetryPolicy(ConfigurationSource conf) { int maxRetryCount = conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY, OzoneConfigKeys. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java index c51d66ae122..dc44392db26 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -44,7 +44,7 @@ private ByteStringConversion(){} // no instantiation. * @see ByteBuffer */ public static Function createByteBufferConversion( - Configuration config){ + ConfigurationSource config){ boolean unsafeEnabled = config!=null && config.getBoolean( OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java index 58394f14990..1db46c4ab30 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java @@ -34,7 +34,7 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.apache.hadoop.hdds.scm.net.NetConstants.SCOPE_REVERSE_STR; import static org.apache.hadoop.hdds.scm.net.NetConstants.ANCESTOR_GENERATION_DEFAULT; @@ -60,7 +60,7 @@ public class NetworkTopologyImpl implements NetworkTopology{ /** Lock to coordinate cluster tree access. */ private ReadWriteLock netlock = new ReentrantReadWriteLock(true); - public NetworkTopologyImpl(Configuration conf) { + public NetworkTopologyImpl(ConfigurationSource conf) { schemaManager = NodeSchemaManager.getInstance(); schemaManager.init(conf); maxLevel = schemaManager.getMaxLevel(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java index c60c2c80aa9..698b9da4b72 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java @@ -24,7 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import java.util.ArrayList; import java.util.Arrays; @@ -55,7 +55,7 @@ public static NodeSchemaManager getInstance() { return instance; } - public void init(Configuration conf) { + public void init(ConfigurationSource conf) { /** * Load schemas from network topology schema configuration file */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java index 9d077f6a84f..394a0c35132 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java @@ -19,14 +19,6 @@ package org.apache.hadoop.hdds.security.x509; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider; -import org.bouncycastle.jce.provider.BouncyCastleProvider; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.nio.file.Path; import java.nio.file.Paths; import java.security.Provider; @@ -34,6 +26,10 @@ import java.time.Duration; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.ozone.OzoneConfigKeys; + +import com.google.common.base.Preconditions; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM; @@ -71,6 +67,10 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider; +import org.bouncycastle.jce.provider.BouncyCastleProvider; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A class that deals with all Security related configs in HDDS. @@ -82,7 +82,7 @@ public class SecurityConfig { private static final Logger LOG = LoggerFactory.getLogger(SecurityConfig.class); private static volatile Provider provider; - private final Configuration configuration; + private final ConfigurationSource configuration; private final int size; private final String keyAlgo; private final String providerString; @@ -106,7 +106,7 @@ public class SecurityConfig { * * @param configuration - HDDS Configuration */ - public SecurityConfig(Configuration configuration) { + public SecurityConfig(ConfigurationSource configuration) { Preconditions.checkNotNull(configuration, "Configuration cannot be null"); this.configuration = configuration; this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN); @@ -305,7 +305,7 @@ public String getSignatureAlgo() { * * @return Configuration */ - public Configuration getConfiguration() { + public ConfigurationSource getConfiguration() { return configuration; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java index fcfa613fad6..a9a2ab8e19e 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java @@ -27,6 +27,7 @@ import io.opentracing.Tracer; import io.opentracing.util.GlobalTracer; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; /** @@ -43,7 +44,7 @@ private TracingUtil() { * Initialize the tracing with the given service name. */ public static void initTracing( - String serviceName, org.apache.hadoop.conf.Configuration conf) { + String serviceName, ConfigurationSource conf) { if (!GlobalTracer.isRegistered() && isTracingEnabled(conf)) { Configuration config = Configuration.fromEnv(serviceName); JaegerTracer tracer = config.getTracerBuilder() @@ -116,7 +117,7 @@ private static SpanContext extractParent(String parent, Tracer tracer) { * calls to the delegate and also enables tracing. */ public static T createProxy( - T delegate, Class itf, org.apache.hadoop.conf.Configuration conf) { + T delegate, Class itf, ConfigurationSource conf) { if (!isTracingEnabled(conf)) { return delegate; } @@ -127,7 +128,7 @@ public static T createProxy( } private static boolean isTracingEnabled( - org.apache.hadoop.conf.Configuration conf) { + ConfigurationSource conf) { return conf.getBoolean( ScmConfigKeys.HDDS_TRACING_ENABLED, ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java new file mode 100644 index 00000000000..badc916da2b --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.utils; + +import java.util.Collection; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; + +/** + * Configuration source to wrap Hadoop Configuration object. + */ +public class LegacyHadoopConfigurationSource implements ConfigurationSource { + + private Configuration configuration; + + public LegacyHadoopConfigurationSource( + Configuration configuration) { + this.configuration = configuration; + } + + @Override + public String get(String key) { + return configuration.getRaw(key); + } + + @Override + public Collection getConfigKeys() { + return configuration.getPropsWithPrefix("").keySet(); + } + + @Override + public void set(String key, String value) { + configuration.set(key, value); + } + + /** + * Helper method to get original Hadoop configuration for legacy Hadoop + * libraries. + *

+ * It can work on server side but not on client side where we might have + * different configuration. + */ + public static Configuration asHadoopConfiguration( + ConfigurationSource config) { + if (config instanceof Configuration) { + return (Configuration) config; + } else if (config instanceof LegacyHadoopConfigurationSource) { + return ((LegacyHadoopConfigurationSource) config).configuration; + } else { + throw new IllegalArgumentException( + "Core Hadoop code requires real Hadoop configuration"); + } + } + + public Configuration getOriginalHadoopConfiguration() { + return configuration; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java index 0bdddaf1822..726862cea73 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java @@ -18,18 +18,6 @@ package org.apache.hadoop.ozone; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_KEY; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; - -import org.apache.commons.validator.routines.InetAddressValidator; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.io.IOException; import java.net.InetAddress; @@ -42,6 +30,18 @@ import java.util.List; import java.util.Set; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; + +import org.apache.commons.validator.routines.InetAddressValidator; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_KEY; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Ozone security Util class. */ @@ -58,12 +58,12 @@ public final class OzoneSecurityUtil { private OzoneSecurityUtil() { } - public static boolean isSecurityEnabled(Configuration conf) { + public static boolean isSecurityEnabled(ConfigurationSource conf) { return conf.getBoolean(OZONE_SECURITY_ENABLED_KEY, OZONE_SECURITY_ENABLED_DEFAULT); } - public static boolean isHttpSecurityEnabled(Configuration conf) { + public static boolean isHttpSecurityEnabled(ConfigurationSource conf) { return isSecurityEnabled(conf) && conf.getBoolean(OZONE_HTTP_SECURITY_ENABLED_KEY, OZONE_HTTP_SECURITY_ENABLED_DEFAULT); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java index 84aca6decb8..ac6e9a13ab1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.lock; import org.apache.commons.pool2.impl.GenericObjectPool; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -44,7 +44,7 @@ public class LockManager { * * @param conf Configuration object */ - public LockManager(final Configuration conf) { + public LockManager(final ConfigurationSource conf) { this(conf, false); } @@ -55,7 +55,7 @@ public LockManager(final Configuration conf) { * @param conf Configuration object * @param fair - true to use fair lock ordering, else non-fair lock ordering. */ - public LockManager(final Configuration conf, boolean fair) { + public LockManager(final ConfigurationSource conf, boolean fair) { lockPool = new GenericObjectPool<>(new PooledLockFactory(fair)); lockPool.setMaxTotal(-1); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java index ddb1f2b4479..7530bd0a880 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java @@ -25,18 +25,17 @@ import java.util.Map; import java.util.Optional; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.test.LambdaTestUtils; -import org.junit.Assert; -import org.junit.Test; import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; import static org.hamcrest.core.Is.is; +import org.junit.Assert; import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import org.junit.Test; /** * Testing HddsUtils. @@ -76,7 +75,7 @@ public void validatePath() throws Exception { @Test public void testGetSCMAddresses() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); Collection addresses; InetSocketAddress addr; Iterator it; diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java index ee724e2ef70..5ab16ab265b 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java @@ -17,26 +17,26 @@ */ package org.apache.hadoop.hdds.conf; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.junit.Rule; -import org.junit.Before; -import org.junit.Test; -import org.junit.Assert; -import org.junit.rules.TemporaryFolder; - import java.io.BufferedWriter; import java.io.File; import java.io.FileWriter; import java.io.IOException; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.fs.Path; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + /** * Test class for OzoneConfiguration. */ public class TestOzoneConfiguration { - private Configuration conf; + private OzoneConfiguration conf; @Rule public TemporaryFolder tempConfigs = new TemporaryFolder(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java index e6511745651..7a8701ec9d0 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; - import java.io.File; import java.time.Duration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + import static org.apache.hadoop.hdds.fs.DUFactory.Conf.configKeyForRefreshPeriod; import static org.apache.hadoop.test.GenericTestUtils.getTestDir; +import org.junit.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -40,7 +40,7 @@ public void testCreateViaConfig() { @Test public void testParams() { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(configKeyForRefreshPeriod(), "1h"); File dir = getTestDir(getClass().getSimpleName()); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java index e3015b57a52..d0dfe6026f7 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java @@ -17,14 +17,14 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.conf.Configuration; -import org.junit.Test; - import java.io.File; import java.time.Duration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + import static org.apache.hadoop.hdds.fs.DedicatedDiskSpaceUsageFactory.Conf.configKeyForRefreshPeriod; import static org.apache.hadoop.test.GenericTestUtils.getTestDir; +import org.junit.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; @@ -41,7 +41,7 @@ public void testCreateViaConfig() { @Test public void testParams() { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(configKeyForRefreshPeriod(), "2m"); File dir = getTestDir(getClass().getSimpleName()); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java index 09b8cc2ecec..4f53b262457 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java @@ -17,19 +17,20 @@ */ package org.apache.hadoop.hdds.fs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.LoggerFactory; - import java.io.File; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.test.GenericTestUtils.LogCapturer; + import static org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory.Conf.configKeyForClassName; +import org.junit.Before; +import org.junit.Test; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.slf4j.LoggerFactory; /** * Tests for {@link SpaceUsageCheckFactory}. @@ -39,7 +40,8 @@ public class TestSpaceUsageFactory { private LogCapturer capturer; /** - * Verifies that {@link SpaceUsageCheckFactory#create(Configuration)} creates + * Verifies that {@link SpaceUsageCheckFactory#create(ConfigurationSource)} + * creates * the correct implementation if configured. This should be called from each * specific implementation's test class. * @return the instance created, so that further checks can done, if needed @@ -47,7 +49,7 @@ public class TestSpaceUsageFactory { protected static T testCreateViaConfig( Class factoryClass) { - Configuration conf = configFor(factoryClass); + OzoneConfiguration conf = configFor(factoryClass); SpaceUsageCheckFactory factory = SpaceUsageCheckFactory.create(conf); @@ -104,10 +106,10 @@ private void assertLogged(String substring) { "in log output, but only got: " + output); } - private static Configuration configFor( - Class factoryClass) { + private static OzoneConfiguration + configFor(Class factoryClass) { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setClass(configKeyForClassName(), factoryClass, SpaceUsageCheckFactory.class); @@ -116,12 +118,12 @@ private static Configuration configFor( private static void testDefaultFactoryForBrokenImplementation( Class brokenImplementationClass) { - Configuration conf = configFor(brokenImplementationClass); + OzoneConfiguration conf = configFor(brokenImplementationClass); assertCreatesDefaultImplementation(conf); } private void testDefaultFactoryForWrongConfig(String value) { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(configKeyForClassName(), value); assertCreatesDefaultImplementation(conf); @@ -133,7 +135,8 @@ private void testDefaultFactoryForWrongConfig(String value) { } } - private static void assertCreatesDefaultImplementation(Configuration conf) { + private static void assertCreatesDefaultImplementation( + OzoneConfiguration conf) { // given // conf @@ -171,15 +174,16 @@ private PrivateConstructor() { } } /** - * Spy factory to verify {@link SpaceUsageCheckFactory#create(Configuration)} + * Spy factory to verify + * {@link SpaceUsageCheckFactory#create(ConfigurationSource)} * properly configures it. */ public static final class SpyFactory implements SpaceUsageCheckFactory { - private Configuration conf; + private ConfigurationSource conf; @Override - public SpaceUsageCheckFactory setConfiguration(Configuration config) { + public SpaceUsageCheckFactory setConfiguration(ConfigurationSource config) { this.conf = config; return this; } @@ -189,7 +193,7 @@ public SpaceUsageCheckParams paramsFor(File dir) { throw new UnsupportedOperationException(); } - public Configuration getConf() { + public ConfigurationSource getConf() { return conf; } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java index 6044666ce89..c8dfd2c8159 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java @@ -17,23 +17,6 @@ */ package org.apache.hadoop.hdds.scm.net; -import org.apache.hadoop.conf.Configuration; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; -import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; -import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA; -import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; - -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -42,6 +25,18 @@ import java.util.Map; import java.util.Random; import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; + +import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR; +import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; +import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; @@ -49,9 +44,14 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.junit.Assume.assumeTrue; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.junit.runner.RunWith; import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import org.junit.runner.RunWith; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** Test the network topology functions. */ @RunWith(Parameterized.class) @@ -221,7 +221,7 @@ public void testCreateInvalidTopology() { @Test public void testInitWithConfigFile() { ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); try { String filePath = classLoader.getResource( "./networkTopologyTestFiles/good.xml").getPath(); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java index 66980437276..ae971559b67 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java @@ -17,19 +17,19 @@ */ package org.apache.hadoop.hdds.scm.net; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_NODEGROUP; import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_RACK; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** Test the node schema loader. */ public class TestNodeSchemaManager { @@ -38,10 +38,10 @@ public class TestNodeSchemaManager { private ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); private NodeSchemaManager manager; - private Configuration conf; + private OzoneConfiguration conf; public TestNodeSchemaManager() { - conf = new Configuration(); + conf = new OzoneConfiguration(); String filePath = classLoader.getResource( "./networkTopologyTestFiles/good.xml").getPath(); conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath); diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 98690c46e50..105e8ac8f09 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -33,7 +33,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - + + org.slf4j + slf4j-api + org.apache.hadoop hadoop-hdds-test-utils diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java new file mode 100644 index 00000000000..48f06eaa257 --- /dev/null +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import javax.annotation.PostConstruct; +import java.lang.reflect.Field; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +/** + * Reflection utilities for configuration injection. + */ +public final class ConfigurationReflectionUtil { + + private ConfigurationReflectionUtil() { + } + + public static void injectConfiguration( + ConfigurationSource configuration, + Class configurationClass, + T configObject, String prefix) { + injectConfigurationToObject(configuration, configurationClass, configObject, + prefix); + Class superClass = configurationClass.getSuperclass(); + while (superClass != null) { + injectConfigurationToObject(configuration, superClass, configObject, + prefix); + superClass = superClass.getSuperclass(); + } + } + + public static void injectConfigurationToObject(ConfigurationSource from, + Class configurationClass, + T configuration, + String prefix) { + for (Field field : configurationClass.getDeclaredFields()) { + if (field.isAnnotationPresent(Config.class)) { + + String fieldLocation = + configurationClass + "." + field.getName(); + + Config configAnnotation = field.getAnnotation(Config.class); + + String key = prefix + "." + configAnnotation.key(); + + ConfigType type = configAnnotation.type(); + + if (type == ConfigType.AUTO) { + type = detectConfigType(field.getType(), fieldLocation); + } + + //Note: default value is handled by ozone-default.xml. Here we can + //use any default. + try { + switch (type) { + case STRING: + forcedFieldSet(field, configuration, from.get(key)); + break; + case INT: + forcedFieldSet(field, configuration, from.getInt(key, 0)); + break; + case BOOLEAN: + forcedFieldSet(field, configuration, from.getBoolean(key, false)); + break; + case LONG: + forcedFieldSet(field, configuration, from.getLong(key, 0)); + break; + case TIME: + forcedFieldSet(field, configuration, + from.getTimeDuration(key, "0s", configAnnotation.timeUnit())); + break; + default: + throw new ConfigurationException( + "Unsupported ConfigType " + type + " on " + fieldLocation); + } + } catch (IllegalAccessException e) { + throw new ConfigurationException( + "Can't inject configuration to " + fieldLocation, e); + } + + } + } + } + + /** + * Set the value of one field even if it's private. + */ + private static void forcedFieldSet(Field field, T object, Object value) + throws IllegalAccessException { + boolean accessChanged = false; + if (!field.isAccessible()) { + field.setAccessible(true); + accessChanged = true; + } + field.set(object, value); + if (accessChanged) { + field.setAccessible(false); + } + } + + private static ConfigType detectConfigType(Class parameterType, + String methodLocation) { + ConfigType type; + if (parameterType == String.class) { + type = ConfigType.STRING; + } else if (parameterType == Integer.class || parameterType == int.class) { + type = ConfigType.INT; + } else if (parameterType == Long.class || parameterType == long.class) { + type = ConfigType.LONG; + } else if (parameterType == Boolean.class + || parameterType == boolean.class) { + type = ConfigType.BOOLEAN; + } else { + throw new ConfigurationException( + "Unsupported configuration type " + parameterType + " in " + + methodLocation); + } + return type; + } + + public static void callPostConstruct(Class configurationClass, + T configObject) { + for (Method method : configurationClass.getMethods()) { + if (method.isAnnotationPresent(PostConstruct.class)) { + try { + method.invoke(configObject); + } catch (IllegalAccessException ex) { + throw new IllegalArgumentException( + "@PostConstruct method in " + configurationClass + + " is not accessible"); + } catch (InvocationTargetException e) { + if (e.getCause() instanceof RuntimeException) { + throw (RuntimeException) e.getCause(); + } else { + throw new IllegalArgumentException( + "@PostConstruct can't be executed on " + configurationClass + + " after configObject " + + "injection", e); + } + } + } + } + } +} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java new file mode 100644 index 00000000000..bc20f68ad15 --- /dev/null +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java @@ -0,0 +1,291 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +/** + * Lightweight interface to defined the contract of the Configuration objects. + */ +public interface ConfigurationSource { + + String[] EMPTY_STRING_ARRAY = {}; + + String get(String key); + + Collection getConfigKeys(); + + @Deprecated + //TODO: user read only configs and don't use it to store actual port + // numbers. + void set(String key, String value); + + default String get(String key, String defaultValue) { + String value = get(key); + return value != null ? value : defaultValue; + } + + default int getInt(String key, int defaultValue) { + String value = get(key); + return value != null ? Integer.parseInt(value) : defaultValue; + } + + /** + * Get the value of the name property as a set of comma-delimited + * int values. + *

+ * If no such property exists, an empty array is returned. + * + * @param name property name + * @return property value interpreted as an array of comma-delimited + * int values + */ + default int[] getInts(String name) { + String[] strings = getTrimmedStrings(name); + int[] ints = new int[strings.length]; + for (int i = 0; i < strings.length; i++) { + ints[i] = Integer.parseInt(strings[i]); + } + return ints; + } + + default long getLong(String key, long defaultValue) { + String value = get(key); + return value != null ? Long.parseLong(value) : defaultValue; + } + + default boolean getBoolean(String key, boolean defaultValue) { + String value = get(key); + return value != null ? Boolean.parseBoolean(value) : defaultValue; + } + + default float getFloat(String key, float defaultValue) { + String value = get(key); + return value != null ? Float.parseFloat(value) : defaultValue; + } + + default double getDouble(String key, double defaultValue) { + String value = get(key); + return value != null ? Double.parseDouble(value) : defaultValue; + } + + default String getTrimmed(String key) { + String value = get(key); + return value != null ? value.trim() : null; + } + + default String getTrimmed(String key, String defaultValue) { + String value = getTrimmed(key); + return value != null ? value : defaultValue; + } + + default String[] getTrimmedStrings(String name) { + String valueString = get(name); + if (null == valueString || valueString.trim().isEmpty()) { + return EMPTY_STRING_ARRAY; + } + + return valueString.trim().split("\\s*[,\n]\\s*"); + } + + default char[] getPassword(String key) throws IOException { + return get(key).toCharArray(); + } + + default Map getPropsWithPrefix(String confPrefix) { + Map configMap = new HashMap<>(); + for (String name : getConfigKeys()) { + if (name.startsWith(confPrefix)) { + String value = get(name); + String keyName = name.substring(confPrefix.length()); + configMap.put(keyName, value); + } + } + return configMap; + } + + /** + * Create a Configuration object and inject the required configuration values. + * + * @param configurationClass The class where the fields are annotated with + * the configuration. + * @return Initiated java object where the config fields are injected. + */ + default T getObject(Class configurationClass) { + + T configObject; + + try { + configObject = configurationClass.newInstance(); + } catch (InstantiationException | IllegalAccessException e) { + throw new ConfigurationException( + "Configuration class can't be created: " + configurationClass, e); + } + ConfigGroup configGroup = + configurationClass.getAnnotation(ConfigGroup.class); + + String prefix = configGroup.prefix(); + + ConfigurationReflectionUtil + .injectConfiguration(this, configurationClass, configObject, + prefix); + + ConfigurationReflectionUtil + .callPostConstruct(configurationClass, configObject); + + return configObject; + + } + + /** + * Get the value of the name property as a Class + * implementing the interface specified by xface. + *

+ * If no such property is specified, then defaultValue is + * returned. + *

+ * An exception is thrown if the returned class does not implement the named + * interface. + * + * @param name the class name. + * @param defaultValue default value. + * @param xface the interface implemented by the named class. + * @return property value as a Class, + * or defaultValue. + */ + default Class getClass(String name, + Class defaultValue, + Class xface) { + try { + Class theClass = getClass(name, defaultValue); + if (theClass != null && !xface.isAssignableFrom(theClass)) { + throw new RuntimeException(theClass + " not " + xface.getName()); + } else if (theClass != null) { + return theClass.asSubclass(xface); + } else { + return null; + } + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + /** + * Get the value of the name property as a Class. + * If no such property is specified, then defaultValue is + * returned. + * + * @param name the class name. + * @param defaultValue default value. + * @return property value as a Class, + * or defaultValue. + */ + default Class getClass(String name, Class defaultValue) { + String valueString = getTrimmed(name); + if (valueString == null) { + return defaultValue; + } + try { + return Class.forName(name); + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + + default Class[] getClasses(String name, Class... defaultValue) { + String valueString = get(name); + if (null == valueString) { + return defaultValue; + } + String[] classnames = getTrimmedStrings(name); + try { + Class[] classes = new Class[classnames.length]; + for (int i = 0; i < classnames.length; i++) { + classes[i] = Class.forName(classnames[i]); + } + return classes; + } catch (ClassNotFoundException e) { + throw new RuntimeException(e); + } + } + + default long getTimeDuration(String name, long defaultValue, + TimeUnit unit) { + String vStr = get(name); + if (null == vStr) { + return defaultValue; + } else { + return TimeDurationUtil.getTimeDurationHelper(name, vStr, unit); + } + } + + default long getTimeDuration(String name, String defaultValue, + TimeUnit unit) { + String vStr = get(name); + if (null == vStr) { + return TimeDurationUtil.getTimeDurationHelper(name, defaultValue, unit); + } else { + return TimeDurationUtil.getTimeDurationHelper(name, vStr, unit); + } + } + + default double getStorageSize(String name, String defaultValue, + StorageUnit targetUnit) { + String vString = get(name); + if (vString == null) { + vString = defaultValue; + } + + // Please note: There is a bit of subtlety here. If the user specifies + // the default unit as "1GB", but the requested unit is MB, we will return + // the format in MB even thought the default string is specified in GB. + + // Converts a string like "1GB" to to unit specified in targetUnit. + + StorageSize measure = StorageSize.parse(vString); + + double byteValue = measure.getUnit().toBytes(measure.getValue()); + return targetUnit.fromBytes(byteValue); + } + + default Collection getTrimmedStringCollection(String key) { + return Arrays.asList(getTrimmedStrings(key)); + } + + /** + * Return value matching this enumerated type. + * Note that the returned value is trimmed by this method. + * + * @param name Property name + * @param defaultValue Value returned if no mapping exists + * @throws IllegalArgumentException If mapping is illegal for the type + * provided + */ + default > T getEnum(String name, T defaultValue) { + final String val = getTrimmed(name); + return null == val + ? defaultValue + : Enum.valueOf(defaultValue.getDeclaringClass(), val); + } + +} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java new file mode 100644 index 00000000000..15016be40aa --- /dev/null +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import java.util.Locale; + +/** + * A class that contains the numeric value and the unit of measure. + */ +public class StorageSize { + private final StorageUnit unit; + private final double value; + + /** + * Constucts a Storage Measure, which contains the value and the unit of + * measure. + * + * @param unit - Unit of Measure + * @param value - Numeric value. + */ + public StorageSize(StorageUnit unit, double value) { + this.unit = unit; + this.value = value; + } + + private static void checkState(boolean state, String errorString) { + if (!state) { + throw new IllegalStateException(errorString); + } + } + + public static StorageSize parse(String value) { + checkState(value != null && value.length() > 0, "value cannot be blank"); + String sanitizedValue = value.trim().toLowerCase(Locale.ENGLISH); + StorageUnit parsedUnit = null; + for (StorageUnit unit : StorageUnit.values()) { + if (sanitizedValue.endsWith(unit.getShortName()) || + sanitizedValue.endsWith(unit.getLongName()) || + sanitizedValue.endsWith(unit.getSuffixChar())) { + parsedUnit = unit; + break; + } + } + + if (parsedUnit == null) { + throw new IllegalArgumentException(value + " is not in expected format." + + "Expected format is . e.g. 1000MB"); + } + + String suffix = ""; + boolean found = false; + + // We are trying to get the longest match first, so the order of + // matching is getLongName, getShortName and then getSuffixChar. + if (!found && sanitizedValue.endsWith(parsedUnit.getLongName())) { + found = true; + suffix = parsedUnit.getLongName(); + } + + if (!found && sanitizedValue.endsWith(parsedUnit.getShortName())) { + found = true; + suffix = parsedUnit.getShortName(); + } + + if (!found && sanitizedValue.endsWith(parsedUnit.getSuffixChar())) { + found = true; + suffix = parsedUnit.getSuffixChar(); + } + + checkState(found, "Something is wrong, we have to find a " + + "match. Internal error."); + + String valString = + sanitizedValue.substring(0, value.length() - suffix.length()); + return new StorageSize(parsedUnit, Double.parseDouble(valString)); + + } + + public StorageUnit getUnit() { + return unit; + } + + public double getValue() { + return value; + } + +} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageUnit.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageUnit.java new file mode 100644 index 00000000000..6678aa48839 --- /dev/null +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageUnit.java @@ -0,0 +1,529 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import java.math.BigDecimal; +import java.math.RoundingMode; + +/** + * Enum to represent storage unit. + */ +public enum StorageUnit { + /* + We rely on BYTES being the last to get longest matching short names first. + The short name of bytes is b and it will match with other longer names. + + if we change this order, the corresponding code in + Configuration#parseStorageUnit needs to be changed too, since values() + call returns the Enums in declared order and we depend on it. + */ + + EB { + @Override + public double toBytes(double value) { + return multiply(value, EXABYTES); + } + + @Override + public double toKBs(double value) { + return multiply(value, EXABYTES / KILOBYTES); + } + + @Override + public double toMBs(double value) { + return multiply(value, EXABYTES / MEGABYTES); + } + + @Override + public double toGBs(double value) { + return multiply(value, EXABYTES / GIGABYTES); + } + + @Override + public double toTBs(double value) { + return multiply(value, EXABYTES / TERABYTES); + } + + @Override + public double toPBs(double value) { + return multiply(value, EXABYTES / PETABYTES); + } + + @Override + public double toEBs(double value) { + return value; + } + + @Override + public String getLongName() { + return "exabytes"; + } + + @Override + public String getShortName() { + return "eb"; + } + + @Override + public String getSuffixChar() { + return "e"; + } + + @Override + public double getDefault(double value) { + return toEBs(value); + } + + @Override + public double fromBytes(double value) { + return divide(value, EXABYTES); + } + }, + PB { + @Override + public double toBytes(double value) { + return multiply(value, PETABYTES); + } + + @Override + public double toKBs(double value) { + return multiply(value, PETABYTES / KILOBYTES); + } + + @Override + public double toMBs(double value) { + return multiply(value, PETABYTES / MEGABYTES); + } + + @Override + public double toGBs(double value) { + return multiply(value, PETABYTES / GIGABYTES); + } + + @Override + public double toTBs(double value) { + return multiply(value, PETABYTES / TERABYTES); + } + + @Override + public double toPBs(double value) { + return value; + } + + @Override + public double toEBs(double value) { + return divide(value, EXABYTES / PETABYTES); + } + + @Override + public String getLongName() { + return "petabytes"; + } + + @Override + public String getShortName() { + return "pb"; + } + + @Override + public String getSuffixChar() { + return "p"; + } + + @Override + public double getDefault(double value) { + return toPBs(value); + } + + @Override + public double fromBytes(double value) { + return divide(value, PETABYTES); + } + }, + TB { + @Override + public double toBytes(double value) { + return multiply(value, TERABYTES); + } + + @Override + public double toKBs(double value) { + return multiply(value, TERABYTES / KILOBYTES); + } + + @Override + public double toMBs(double value) { + return multiply(value, TERABYTES / MEGABYTES); + } + + @Override + public double toGBs(double value) { + return multiply(value, TERABYTES / GIGABYTES); + } + + @Override + public double toTBs(double value) { + return value; + } + + @Override + public double toPBs(double value) { + return divide(value, PETABYTES / TERABYTES); + } + + @Override + public double toEBs(double value) { + return divide(value, EXABYTES / TERABYTES); + } + + @Override + public String getLongName() { + return "terabytes"; + } + + @Override + public String getShortName() { + return "tb"; + } + + @Override + public String getSuffixChar() { + return "t"; + } + + @Override + public double getDefault(double value) { + return toTBs(value); + } + + @Override + public double fromBytes(double value) { + return divide(value, TERABYTES); + } + }, + GB { + @Override + public double toBytes(double value) { + return multiply(value, GIGABYTES); + } + + @Override + public double toKBs(double value) { + return multiply(value, GIGABYTES / KILOBYTES); + } + + @Override + public double toMBs(double value) { + return multiply(value, GIGABYTES / MEGABYTES); + } + + @Override + public double toGBs(double value) { + return value; + } + + @Override + public double toTBs(double value) { + return divide(value, TERABYTES / GIGABYTES); + } + + @Override + public double toPBs(double value) { + return divide(value, PETABYTES / GIGABYTES); + } + + @Override + public double toEBs(double value) { + return divide(value, EXABYTES / GIGABYTES); + } + + @Override + public String getLongName() { + return "gigabytes"; + } + + @Override + public String getShortName() { + return "gb"; + } + + @Override + public String getSuffixChar() { + return "g"; + } + + @Override + public double getDefault(double value) { + return toGBs(value); + } + + @Override + public double fromBytes(double value) { + return divide(value, GIGABYTES); + } + }, + MB { + @Override + public double toBytes(double value) { + return multiply(value, MEGABYTES); + } + + @Override + public double toKBs(double value) { + return multiply(value, MEGABYTES / KILOBYTES); + } + + @Override + public double toMBs(double value) { + return value; + } + + @Override + public double toGBs(double value) { + return divide(value, GIGABYTES / MEGABYTES); + } + + @Override + public double toTBs(double value) { + return divide(value, TERABYTES / MEGABYTES); + } + + @Override + public double toPBs(double value) { + return divide(value, PETABYTES / MEGABYTES); + } + + @Override + public double toEBs(double value) { + return divide(value, EXABYTES / MEGABYTES); + } + + @Override + public String getLongName() { + return "megabytes"; + } + + @Override + public String getShortName() { + return "mb"; + } + + @Override + public String getSuffixChar() { + return "m"; + } + + @Override + public double fromBytes(double value) { + return divide(value, MEGABYTES); + } + + @Override + public double getDefault(double value) { + return toMBs(value); + } + }, + KB { + @Override + public double toBytes(double value) { + return multiply(value, KILOBYTES); + } + + @Override + public double toKBs(double value) { + return value; + } + + @Override + public double toMBs(double value) { + return divide(value, MEGABYTES / KILOBYTES); + } + + @Override + public double toGBs(double value) { + return divide(value, GIGABYTES / KILOBYTES); + } + + @Override + public double toTBs(double value) { + return divide(value, TERABYTES / KILOBYTES); + } + + @Override + public double toPBs(double value) { + return divide(value, PETABYTES / KILOBYTES); + } + + @Override + public double toEBs(double value) { + return divide(value, EXABYTES / KILOBYTES); + } + + @Override + public String getLongName() { + return "kilobytes"; + } + + @Override + public String getShortName() { + return "kb"; + } + + @Override + public String getSuffixChar() { + return "k"; + } + + @Override + public double getDefault(double value) { + return toKBs(value); + } + + @Override + public double fromBytes(double value) { + return divide(value, KILOBYTES); + } + }, + BYTES { + @Override + public double toBytes(double value) { + return value; + } + + @Override + public double toKBs(double value) { + return divide(value, KILOBYTES); + } + + @Override + public double toMBs(double value) { + return divide(value, MEGABYTES); + } + + @Override + public double toGBs(double value) { + return divide(value, GIGABYTES); + } + + @Override + public double toTBs(double value) { + return divide(value, TERABYTES); + } + + @Override + public double toPBs(double value) { + return divide(value, PETABYTES); + } + + @Override + public double toEBs(double value) { + return divide(value, EXABYTES); + } + + @Override + public String getLongName() { + return "bytes"; + } + + @Override + public String getShortName() { + return "b"; + } + + @Override + public String getSuffixChar() { + return "b"; + } + + @Override + public double getDefault(double value) { + return toBytes(value); + } + + @Override + public double fromBytes(double value) { + return value; + } + }; + + private static final double BYTE = 1L; + private static final double KILOBYTES = BYTE * 1024L; + private static final double MEGABYTES = KILOBYTES * 1024L; + private static final double GIGABYTES = MEGABYTES * 1024L; + private static final double TERABYTES = GIGABYTES * 1024L; + private static final double PETABYTES = TERABYTES * 1024L; + private static final double EXABYTES = PETABYTES * 1024L; + private static final int PRECISION = 4; + + /** + * Using BigDecimal to avoid issues with overflow and underflow. + * + * @param value - value + * @param divisor - divisor. + * @return -- returns a double that represents this value + */ + private static double divide(double value, double divisor) { + BigDecimal val = new BigDecimal(value); + BigDecimal bDivisor = new BigDecimal(divisor); + return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP) + .doubleValue(); + } + + /** + * Using BigDecimal so we can throw if we are overflowing the Long.Max. + * + * @param first - First Num. + * @param second - Second Num. + * @return Returns a double + */ + private static double multiply(double first, double second) { + BigDecimal firstVal = new BigDecimal(first); + BigDecimal secondVal = new BigDecimal(second); + return firstVal.multiply(secondVal) + .setScale(PRECISION, RoundingMode.HALF_UP).doubleValue(); + } + + public abstract double toBytes(double value); + + public abstract double toKBs(double value); + + public abstract double toMBs(double value); + + public abstract double toGBs(double value); + + public abstract double toTBs(double value); + + public abstract double toPBs(double value); + + public abstract double toEBs(double value); + + public abstract String getLongName(); + + public abstract String getShortName(); + + public abstract String getSuffixChar(); + + public abstract double getDefault(double value); + + public abstract double fromBytes(double value); + + public String toString() { + return getLongName(); + } + +} diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java new file mode 100644 index 00000000000..2bbdecff5ed --- /dev/null +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java @@ -0,0 +1,154 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.conf; + +import java.util.concurrent.TimeUnit; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Utility to handle time duration. + */ +public final class TimeDurationUtil { + + public static final Logger LOG = + LoggerFactory.getLogger(TimeDurationUtil.class); + + private TimeDurationUtil() { + } + + /** + * Return time duration in the given time unit. Valid units are encoded in + * properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds + * (ms), seconds (s), minutes (m), hours (h), and days (d). + * + * @param name Property name + * @param vStr The string value with time unit suffix to be converted. + * @param unit Unit to convert the stored property, if it exists. + */ + public static long getTimeDurationHelper(String name, String vStr, + TimeUnit unit) { + vStr = vStr.trim(); + vStr = vStr.toLowerCase(); + ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr); + if (null == vUnit) { + LOG.warn("No unit for " + name + "(" + vStr + ") assuming " + unit); + vUnit = ParsedTimeDuration.unitFor(unit); + } else { + vStr = vStr.substring(0, vStr.lastIndexOf(vUnit.suffix())); + } + + long raw = Long.parseLong(vStr); + long converted = unit.convert(raw, vUnit.unit()); + if (vUnit.unit().convert(converted, unit) < raw) { + LOG.warn("Possible loss of precision converting " + vStr + + vUnit.suffix() + " to " + unit + " for " + name); + } + return converted; + } + + enum ParsedTimeDuration { + NS { + TimeUnit unit() { + return TimeUnit.NANOSECONDS; + } + + String suffix() { + return "ns"; + } + }, + US { + TimeUnit unit() { + return TimeUnit.MICROSECONDS; + } + + String suffix() { + return "us"; + } + }, + MS { + TimeUnit unit() { + return TimeUnit.MILLISECONDS; + } + + String suffix() { + return "ms"; + } + }, + S { + TimeUnit unit() { + return TimeUnit.SECONDS; + } + + String suffix() { + return "s"; + } + }, + M { + TimeUnit unit() { + return TimeUnit.MINUTES; + } + + String suffix() { + return "m"; + } + }, + H { + TimeUnit unit() { + return TimeUnit.HOURS; + } + + String suffix() { + return "h"; + } + }, + D { + TimeUnit unit() { + return TimeUnit.DAYS; + } + + String suffix() { + return "d"; + } + }; + + abstract TimeUnit unit(); + + abstract String suffix(); + + static ParsedTimeDuration unitFor(String s) { + for (ParsedTimeDuration ptd : values()) { + // iteration order is in decl order, so SECONDS matched last + if (s.endsWith(ptd.suffix())) { + return ptd; + } + } + return null; + } + + static ParsedTimeDuration unitFor(TimeUnit unit) { + for (ParsedTimeDuration ptd : values()) { + if (ptd.unit() == unit) { + return ptd; + } + } + return null; + } + } +} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java index fe2d06550ff..74155c2244d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java @@ -19,8 +19,8 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.http.BaseHttpServer; /** @@ -30,7 +30,7 @@ */ public class HddsDatanodeHttpServer extends BaseHttpServer { - public HddsDatanodeHttpServer(Configuration conf) throws IOException { + public HddsDatanodeHttpServer(OzoneConfiguration conf) throws IOException { super(conf, "hddsDatanode"); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 6a6d7187142..e811a883ed5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -29,11 +29,11 @@ import java.util.concurrent.atomic.AtomicBoolean; import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; @@ -357,7 +357,7 @@ private void getSCMSignedCert(OzoneConfiguration config) { * @param config * */ @VisibleForTesting - public PKCS10CertificationRequest getCSR(Configuration config) + public PKCS10CertificationRequest getCSR(ConfigurationSource config) throws IOException { CertificateSignRequest.Builder builder = dnCertClient.getCSRBuilder(); KeyPair keyPair = new KeyPair(dnCertClient.getPublicKey(), diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java index 3de95796dda..e07f626de0b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.common.helpers; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.metrics2.MetricsSystem; @@ -86,7 +86,7 @@ public ContainerMetrics(int[] intervals) { } } - public static ContainerMetrics create(Configuration conf) { + public static ContainerMetrics create(ConfigurationSource conf) { MetricsSystem ms = DefaultMetricsSystem.instance(); // Percentile measurement is off by default, by watching no intervals int[] intervals = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java index a5bcc2270bb..055f4482c5b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java @@ -18,20 +18,20 @@ package org.apache.hadoop.ozone.container.common.impl; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.util.List; + import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.util.List; +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableList; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_FIND_DATA_DIR; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Defines layout versions for the Chunks. @@ -94,7 +94,8 @@ public static List getAllVersions() { /** * @return the latest version. */ - public static ChunkLayOutVersion getConfiguredVersion(Configuration conf) { + public static ChunkLayOutVersion getConfiguredVersion( + ConfigurationSource conf) { try { return conf.getEnum(ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY, DEFAULT_LAYOUT); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index 0ea8189798a..3f07c953378 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -23,7 +23,7 @@ import java.util.Optional; import java.util.Set; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -77,7 +77,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { private static final AuditLogger AUDIT = new AuditLogger(AuditLoggerType.DNLOGGER); private final Map handlers; - private final Configuration conf; + private final ConfigurationSource conf; private final ContainerSet containerSet; private final VolumeSet volumeSet; private final StateContext context; @@ -92,7 +92,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor { * Constructs an OzoneContainer that receives calls from * XceiverServerHandler. */ - public HddsDispatcher(Configuration config, ContainerSet contSet, + public HddsDispatcher(ConfigurationSource config, ContainerSet contSet, VolumeSet volumes, Map handlers, StateContext context, ContainerMetrics metrics, TokenVerifier tokenVerifier) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java index 9f5b9f73b44..4ba7572a094 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java @@ -23,7 +23,7 @@ import java.io.OutputStream; import java.util.function.Consumer; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; @@ -45,7 +45,7 @@ @SuppressWarnings("visibilitymodifier") public abstract class Handler { - protected final Configuration conf; + protected final ConfigurationSource conf; protected final ContainerSet containerSet; protected final VolumeSet volumeSet; protected String scmID; @@ -53,7 +53,7 @@ public abstract class Handler { protected String datanodeId; private Consumer icrSender; - protected Handler(Configuration config, String datanodeId, + protected Handler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volumeSet, ContainerMetrics containerMetrics, Consumer icrSender) { @@ -66,7 +66,7 @@ protected Handler(Configuration config, String datanodeId, } public static Handler getHandlerForContainerType( - final ContainerType containerType, final Configuration config, + final ContainerType containerType, final ConfigurationSource config, final String datanodeId, final ContainerSet contSet, final VolumeSet volumeSet, final ContainerMetrics metrics, Consumer icrSender) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java index 536d4cc06b3..bb43d0f3bba 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java @@ -17,20 +17,21 @@ package org.apache.hadoop.ozone.container.common.report; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.util.concurrent.HadoopExecutors; + import com.google.common.base.Preconditions; import com.google.common.util.concurrent.ThreadFactoryBuilder; import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.util.concurrent.HadoopExecutors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; - /** * ReportManager is responsible for managing all the {@link ReportPublisher} * and also provides {@link ScheduledExecutorService} to ReportPublisher @@ -89,7 +90,7 @@ public void shutdown() { * @param conf - Conf * @return builder - Builder. */ - public static Builder newBuilder(Configuration conf) { + public static Builder newBuilder(ConfigurationSource conf) { return new Builder(conf); } @@ -103,7 +104,7 @@ public static final class Builder { private ReportPublisherFactory publisherFactory; - private Builder(Configuration conf) { + private Builder(ConfigurationSource conf) { this.reportPublishers = new ArrayList<>(); this.publisherFactory = new ReportPublisherFactory(conf); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java index a5e04aa5b05..685a1d993f9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java @@ -17,30 +17,29 @@ package org.apache.hadoop.ozone.container.common.report; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configurable; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.IOException; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; + +import com.google.protobuf.GeneratedMessage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * Abstract class responsible for scheduling the reports based on the * configured interval. All the ReportPublishers should extend this class. */ public abstract class ReportPublisher - implements Configurable, Runnable { + implements Runnable { private static final Logger LOG = LoggerFactory.getLogger( ReportPublisher.class); - private Configuration config; + private ConfigurationSource config; private StateContext context; private ScheduledExecutorService executor; @@ -58,13 +57,11 @@ public void init(StateContext stateContext, getReportFrequency(), TimeUnit.MILLISECONDS); } - @Override - public void setConf(Configuration conf) { + public void setConf(ConfigurationSource conf) { config = conf; } - @Override - public Configuration getConf() { + public ConfigurationSource getConf() { return config; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java index 1c456a0519b..45336918be6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java @@ -17,27 +17,23 @@ package org.apache.hadoop.ozone.container.common.report; -import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.util.ReflectionUtils; - import java.util.HashMap; import java.util.Map; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; + +import com.google.protobuf.GeneratedMessage; + /** * Factory class to construct {@link ReportPublisher} for a report. */ public class ReportPublisherFactory { - private final Configuration conf; + private final ConfigurationSource conf; private final Map, Class> report2publisher; @@ -46,7 +42,7 @@ public class ReportPublisherFactory { * * @param conf Configuration to be passed to the {@link ReportPublisher} */ - public ReportPublisherFactory(Configuration conf) { + public ReportPublisherFactory(ConfigurationSource conf) { this.conf = conf; this.report2publisher = new HashMap<>(); @@ -73,7 +69,13 @@ public ReportPublisher getPublisherFor( if (publisherClass == null) { throw new RuntimeException("No publisher found for report " + report); } - return ReflectionUtils.newInstance(publisherClass, conf); + try { + ReportPublisher reportPublisher = publisherClass.newInstance(); + reportPublisher.setConf(conf); + return reportPublisher; + } catch (Exception e) { + throw new RuntimeException(e); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java index dcde6fe7b2c..e41a5376e4e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java @@ -24,34 +24,23 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.ozone.HddsDatanodeStopService; import org.apache.hadoop.ozone.container.common.report.ReportManager; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CloseContainerCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .ClosePipelineCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CommandDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .CreatePipelineCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .DeleteBlocksCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .DeleteContainerCommandHandler; -import org.apache.hadoop.ozone.container.common.statemachine.commandhandler - .ReplicateContainerCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ClosePipelineCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CommandDispatcher; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CreatePipelineCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteContainerCommandHandler; +import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler; import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.replication.ContainerReplicator; @@ -76,7 +65,7 @@ public class DatanodeStateMachine implements Closeable { static final Logger LOG = LoggerFactory.getLogger(DatanodeStateMachine.class); private final ExecutorService executorService; - private final Configuration conf; + private final ConfigurationSource conf; private final SCMConnectionManager connectionManager; private StateContext context; private final OzoneContainer container; @@ -106,11 +95,10 @@ public class DatanodeStateMachine implements Closeable { * enabled */ public DatanodeStateMachine(DatanodeDetails datanodeDetails, - Configuration conf, CertificateClient certClient, + ConfigurationSource conf, CertificateClient certClient, HddsDatanodeStopService hddsDatanodeStopService) throws IOException { - OzoneConfiguration ozoneConf = new OzoneConfiguration(conf); DatanodeConfiguration dnConf = - ozoneConf.getObject(DatanodeConfiguration.class); + conf.getObject(DatanodeConfiguration.class); this.hddsDatanodeStopService = hddsDatanodeStopService; this.conf = conf; @@ -126,7 +114,7 @@ public DatanodeStateMachine(DatanodeDetails datanodeDetails, constructionLock.writeLock().lock(); try { container = new OzoneContainer(this.datanodeDetails, - ozoneConf, context, certClient); + conf, context, certClient); } finally { constructionLock.writeLock().unlock(); } @@ -208,7 +196,8 @@ private void start() throws IOException { // Start jvm monitor jvmPauseMonitor = new JvmPauseMonitor(); - jvmPauseMonitor.init(conf); + jvmPauseMonitor + .init(LegacyHadoopConfigurationSource.asHadoopConfiguration(conf)); jvmPauseMonitor.start(); while (context.getState() != DatanodeStates.SHUTDOWN) { @@ -456,7 +445,7 @@ public boolean isDaemonStopped() { * * @param config */ - private void initCommandHandlerThread(Configuration config) { + private void initCommandHandlerThread(ConfigurationSource config) { /** * Task that periodically checks if we have any outstanding commands. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java index a500d4ad758..cd1a376b115 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.ozone.container.common.statemachine; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.protocol.VersionResponse; import org.apache.hadoop.ozone.protocolPB .StorageContainerDatanodeProtocolClientSideTranslatorPB; @@ -46,7 +46,7 @@ public class EndpointStateMachine private final AtomicLong missedCount; private final InetSocketAddress address; private final Lock lock; - private final Configuration conf; + private final ConfigurationSource conf; private EndPointStates state; private VersionResponse version; private ZonedDateTime lastSuccessfulHeartbeat; @@ -59,7 +59,7 @@ public class EndpointStateMachine */ public EndpointStateMachine(InetSocketAddress address, StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint, - Configuration conf) { + ConfigurationSource conf) { this.endPoint = endPoint; this.missedCount = new AtomicLong(0); this.address = address; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java index 814eeb4a21d..ebc53c990f5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java @@ -16,22 +16,6 @@ */ package org.apache.hadoop.ozone.container.common.statemachine; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.retry.RetryPolicies; -import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import javax.management.ObjectName; import java.io.Closeable; import java.io.IOException; @@ -45,9 +29,25 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.retry.RetryPolicies; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB; +import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; +import org.apache.hadoop.security.UserGroupInformation; + import static java.util.Collections.unmodifiableList; -import static org.apache.hadoop.hdds.utils.HddsServerUtil - .getScmRpcTimeOutInMilliseconds; +import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmRpcTimeOutInMilliseconds; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * SCMConnectionManager - Acts as a class that manages the membership @@ -62,10 +62,10 @@ public class SCMConnectionManager private final Map scmMachines; private final int rpcTimeout; - private final Configuration conf; + private final ConfigurationSource conf; private ObjectName jmxBean; - public SCMConnectionManager(Configuration conf) { + public SCMConnectionManager(ConfigurationSource conf) { this.mapLock = new ReentrantReadWriteLock(); Long timeOut = getScmRpcTimeOutInMilliseconds(conf); this.rpcTimeout = timeOut.intValue(); @@ -82,7 +82,7 @@ public SCMConnectionManager(Configuration conf) { * * @return ozoneConfig. */ - public Configuration getConf() { + public ConfigurationSource getConf() { return conf; } @@ -139,7 +139,11 @@ public void addSCMServer(InetSocketAddress address) throws IOException { return; } - RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class, + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(this.conf); + RPC.setProtocolEngine( + hadoopConfig, + StorageContainerDatanodeProtocolPB.class, ProtobufRpcEngine.class); long version = RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class); @@ -150,8 +154,8 @@ public void addSCMServer(InetSocketAddress address) throws IOException { StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy( StorageContainerDatanodeProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(), + address, UserGroupInformation.getCurrentUser(), hadoopConfig, + NetUtils.getDefaultSocketFactory(hadoopConfig), getRpcTimeout(), retryPolicy).getProxy(); StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = @@ -159,7 +163,7 @@ public void addSCMServer(InetSocketAddress address) throws IOException { rpcProxy); EndpointStateMachine endPoint = - new EndpointStateMachine(address, rpcClient, conf); + new EndpointStateMachine(address, rpcClient, this.conf); endPoint.setPassive(false); scmMachines.put(address, endPoint); } finally { @@ -181,8 +185,9 @@ public void addReconServer(InetSocketAddress address) throws IOException { "Ignoring the request."); return; } - - RPC.setProtocolEngine(conf, ReconDatanodeProtocolPB.class, + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(this.conf); + RPC.setProtocolEngine(hadoopConfig, ReconDatanodeProtocolPB.class, ProtobufRpcEngine.class); long version = RPC.getProtocolVersion(ReconDatanodeProtocolPB.class); @@ -192,8 +197,8 @@ public void addReconServer(InetSocketAddress address) throws IOException { 60000, TimeUnit.MILLISECONDS); ReconDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy( ReconDatanodeProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(), + address, UserGroupInformation.getCurrentUser(), hadoopConfig, + NetUtils.getDefaultSocketFactory(hadoopConfig), getRpcTimeout(), retryPolicy).getProxy(); StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 04502b6225f..73bea256c6f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -16,54 +16,45 @@ */ package org.apache.hadoop.ozone.container.common.statemachine; -import com.google.common.base.Preconditions; -import com.google.protobuf.GeneratedMessage; - import java.net.InetSocketAddress; +import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; import java.util.Map; +import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Consumer; -import org.apache.commons.collections.CollectionUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatus.Status; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; import org.apache.hadoop.ozone.protocol.commands.CommandStatus; -import org.apache.hadoop.ozone.protocol.commands - .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder; +import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; +import com.google.common.base.Preconditions; +import com.google.protobuf.GeneratedMessage; import static java.lang.Math.min; +import org.apache.commons.collections.CollectionUtils; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmHeartbeatInterval; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; -import java.util.ArrayList; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.function.Consumer; - /** * Current Context of State Machine. */ @@ -75,7 +66,7 @@ public class StateContext { private final Lock lock; private final DatanodeStateMachine parent; private final AtomicLong stateExecutionCount; - private final Configuration conf; + private final ConfigurationSource conf; private final Set endpoints; private final Map> reports; private final Map> containerActions; @@ -98,8 +89,9 @@ public class StateContext { * @param state - State * @param parent Parent State Machine */ - public StateContext(Configuration conf, DatanodeStateMachine.DatanodeStates - state, DatanodeStateMachine parent) { + public StateContext(ConfigurationSource conf, + DatanodeStateMachine.DatanodeStates + state, DatanodeStateMachine parent) { this.conf = conf; this.state = state; this.parent = parent; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index 49a8fd97597..c60c1129f56 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -16,24 +16,26 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.Collection; +import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto; -import org.apache.hadoop.hdds.protocol.proto. - StorageContainerDatanodeProtocolProtos.SCMCommandProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.transport.server - .XceiverServerSpi; +import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.util.Time; + import org.apache.ratis.client.RaftClient; import org.apache.ratis.protocol.AlreadyExistsException; import org.apache.ratis.protocol.RaftGroup; @@ -42,11 +44,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collection; -import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; - /** * Handler for create pipeline command received from SCM. */ @@ -56,14 +53,14 @@ public class CreatePipelineCommandHandler implements CommandHandler { LoggerFactory.getLogger(CreatePipelineCommandHandler.class); private final AtomicLong invocationCount = new AtomicLong(0); - private final Configuration conf; + private final ConfigurationSource conf; private long totalTime; /** * Constructs a createPipelineCommand handler. */ - public CreatePipelineCommandHandler(Configuration conf) { + public CreatePipelineCommandHandler(ConfigurationSource conf) { this.conf = conf; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index eac26f1bd32..20982572986 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -17,7 +17,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.SCMCommandProto; @@ -68,13 +68,13 @@ public class DeleteBlocksCommandHandler implements CommandHandler { LoggerFactory.getLogger(DeleteBlocksCommandHandler.class); private final ContainerSet containerSet; - private final Configuration conf; + private final ConfigurationSource conf; private int invocationCount; private long totalTime; private boolean cmdExecuted; public DeleteBlocksCommandHandler(ContainerSet cset, - Configuration conf) { + ConfigurationSource conf) { this.containerSet = cset; this.conf = conf; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java index a96032e5c4c..41958bf0c37 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java @@ -18,7 +18,7 @@ import java.util.List; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; @@ -46,12 +46,12 @@ public class ReplicateContainerCommandHandler implements CommandHandler { private long totalTime; - private Configuration conf; + private ConfigurationSource conf; private ReplicationSupervisor supervisor; public ReplicateContainerCommandHandler( - Configuration conf, + ConfigurationSource conf, ReplicationSupervisor supervisor) { this.conf = conf; this.supervisor = supervisor; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java index a73f1c5054e..ba898db751c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java @@ -16,21 +16,6 @@ */ package org.apache.hadoop.ozone.container.common.states.datanode; -import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.utils.HddsServerUtil; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; @@ -42,8 +27,21 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.states.DatanodeState; + +import com.google.common.base.Strings; import static org.apache.hadoop.hdds.HddsUtils.getReconAddresses; import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Init Datanode State is the task that gets run when we are in Init State. @@ -52,7 +50,7 @@ public class InitDatanodeState implements DatanodeState, Callable { static final Logger LOG = LoggerFactory.getLogger(InitDatanodeState.class); private final SCMConnectionManager connectionManager; - private final Configuration conf; + private final ConfigurationSource conf; private final StateContext context; private Future result; @@ -63,7 +61,7 @@ public class InitDatanodeState implements DatanodeState, * @param connectionManager - Connection Manager * @param context - Current Context */ - public InitDatanodeState(Configuration conf, + public InitDatanodeState(ConfigurationSource conf, SCMConnectionManager connectionManager, StateContext context) { this.conf = conf; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java index 779b1a21fbc..1ecfbf954fb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.ozone.container.common.states.datanode; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine.EndPointStates; @@ -51,14 +51,14 @@ public class RunningDatanodeState implements DatanodeState { static final Logger LOG = LoggerFactory.getLogger(RunningDatanodeState.class); private final SCMConnectionManager connectionManager; - private final Configuration conf; + private final ConfigurationSource conf; private final StateContext context; private CompletionService ecs; /** Cache the end point task per end point per end point state. */ private Map>> endpointTasks; - public RunningDatanodeState(Configuration conf, + public RunningDatanodeState(ConfigurationSource conf, SCMConnectionManager connectionManager, StateContext context) { this.connectionManager = connectionManager; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index fb1d1af428e..494ccd9bf35 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -21,7 +21,7 @@ import com.google.common.base.Preconditions; import com.google.protobuf.Descriptors; import com.google.protobuf.GeneratedMessage; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto @@ -78,7 +78,7 @@ public class HeartbeatEndpointTask static final Logger LOG = LoggerFactory.getLogger(HeartbeatEndpointTask.class); private final EndpointStateMachine rpcEndpoint; - private final Configuration conf; + private final ConfigurationSource conf; private DatanodeDetailsProto datanodeDetailsProto; private StateContext context; private int maxContainerActionsPerHB; @@ -90,7 +90,7 @@ public class HeartbeatEndpointTask * @param conf Config. */ public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint, - Configuration conf, StateContext context) { + ConfigurationSource conf, StateContext context) { this.rpcEndpoint = rpcEndpoint; this.conf = conf; this.context = context; @@ -344,7 +344,7 @@ private void processResponse(SCMHeartbeatResponseProto response, */ public static class Builder { private EndpointStateMachine endPointStateMachine; - private Configuration conf; + private ConfigurationSource conf; private DatanodeDetails datanodeDetails; private StateContext context; @@ -371,7 +371,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { * @param config - config * @return Builder */ - public Builder setConfig(Configuration config) { + public Builder setConfig(ConfigurationSource config) { this.conf = config; return this; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 92e57437251..be95f011407 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -19,7 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; @@ -49,7 +49,7 @@ public final class RegisterEndpointTask implements static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class); private final EndpointStateMachine rpcEndPoint; - private final Configuration conf; + private final ConfigurationSource conf; private Future result; private DatanodeDetails datanodeDetails; private final OzoneContainer datanodeContainerManager; @@ -64,7 +64,7 @@ public final class RegisterEndpointTask implements */ @VisibleForTesting public RegisterEndpointTask(EndpointStateMachine rpcEndPoint, - Configuration conf, OzoneContainer ozoneContainer, + ConfigurationSource conf, OzoneContainer ozoneContainer, StateContext context) { this.rpcEndPoint = rpcEndPoint; this.conf = conf; @@ -163,7 +163,7 @@ public static Builder newBuilder() { */ public static class Builder { private EndpointStateMachine endPointStateMachine; - private Configuration conf; + private ConfigurationSource conf; private DatanodeDetails datanodeDetails; private OzoneContainer container; private StateContext context; @@ -191,7 +191,7 @@ public Builder setEndpointStateMachine(EndpointStateMachine rpcEndPoint) { * @param config - config * @return Builder. */ - public Builder setConfig(Configuration config) { + public Builder setConfig(ConfigurationSource config) { this.conf = config; return this; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java index 0834f7714e4..6c53756b055 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java @@ -16,26 +16,25 @@ */ package org.apache.hadoop.ozone.container.common.states.endpoint; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; +import java.io.IOException; +import java.util.Map; +import java.util.concurrent.Callable; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.protocol.VersionResponse; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; + +import com.google.common.base.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.Callable; - /** * Task that returns version. */ @@ -44,11 +43,11 @@ public class VersionEndpointTask implements public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask .class); private final EndpointStateMachine rpcEndPoint; - private final Configuration configuration; + private final ConfigurationSource configuration; private final OzoneContainer ozoneContainer; public VersionEndpointTask(EndpointStateMachine rpcEndPoint, - Configuration conf, OzoneContainer container) { + ConfigurationSource conf, OzoneContainer container) { this.rpcEndPoint = rpcEndPoint; this.configuration = conf; this.ozoneContainer = container; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java index 441d9c8a056..9adabf4611b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java @@ -18,19 +18,21 @@ package org.apache.hadoop.ozone.container.common.transport.server; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.PipelineReport; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.container.common.helpers. - StorageContainerException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor; @@ -39,6 +41,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import com.google.common.base.Preconditions; import io.opentracing.Scope; import org.apache.ratis.thirdparty.io.grpc.BindableService; import org.apache.ratis.thirdparty.io.grpc.Server; @@ -50,12 +53,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - /** * Creates a Grpc server endpoint that acts as the communication layer for * Ozone containers. @@ -77,7 +74,8 @@ public final class XceiverServerGrpc implements XceiverServerSpi { * * @param conf - Configuration */ - public XceiverServerGrpc(DatanodeDetails datanodeDetails, Configuration conf, + public XceiverServerGrpc(DatanodeDetails datanodeDetails, + ConfigurationSource conf, ContainerDispatcher dispatcher, CertificateClient caClient, BindableService... additionalServices) { Preconditions.checkNotNull(conf); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 8b7e71014e7..c529e7b465e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -18,14 +18,36 @@ package org.apache.hadoop.ozone.container.common.transport.server.ratis; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Semaphore; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; - +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Container2BCSIDMapProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto; import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; @@ -33,11 +55,18 @@ import org.apache.hadoop.hdds.utils.Cache; import org.apache.hadoop.hdds.utils.ResourceLimitCache; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.util.Time; -import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.apache.ratis.proto.RaftProtos.LogEntryProto; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; +import org.apache.ratis.proto.RaftProtos.RoleInfoProto; +import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeerId; @@ -46,54 +75,18 @@ import org.apache.ratis.server.impl.RaftServerProxy; import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.raftlog.RaftLog; -import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos. - Container2BCSIDMapProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerCommandResponseProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .WriteChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkRequestProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ReadChunkResponseProto; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.server.storage.RaftStorage; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.proto.RaftProtos.LogEntryProto; import org.apache.ratis.statemachine.StateMachineStorage; import org.apache.ratis.statemachine.TransactionContext; import org.apache.ratis.statemachine.impl.BaseStateMachine; import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; +import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Semaphore; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.stream.Collectors; -import java.util.concurrent.Executors; -import java.io.FileOutputStream; -import java.io.FileInputStream; -import java.io.OutputStream; - /** A {@link org.apache.ratis.statemachine.StateMachine} for containers. * * The stateMachine is responsible for handling different types of container @@ -161,7 +154,7 @@ public class ContainerStateMachine extends BaseStateMachine { public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher, ContainerController containerController, List chunkExecutors, - XceiverServerRatis ratisServer, Configuration conf) { + XceiverServerRatis ratisServer, ConfigurationSource conf) { this.gid = gid; this.dispatcher = dispatcher; this.containerController = containerController; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index a3dc7501247..d3bdb3f027a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -18,78 +18,86 @@ package org.apache.hadoop.ozone.container.common.transport.server.ratis; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.collect.ImmutableList; -import com.google.common.util.concurrent.ThreadFactoryBuilder; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.LinkedBlockingDeque; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ThreadPoolExecutor; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; -import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.OzoneConfigKeys; - import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; - -import io.opentracing.Scope; import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.collect.ImmutableList; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import io.opentracing.Scope; import org.apache.ratis.RaftConfigKeys; -import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.grpc.GrpcFactory; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.protocol.*; +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.proto.RaftProtos.RoleInfoProto; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.GroupInfoReply; +import org.apache.ratis.protocol.GroupInfoRequest; +import org.apache.ratis.protocol.GroupManagementRequest; +import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftClientRequest; +import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.StateMachineException; import org.apache.ratis.rpc.RpcType; import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.RaftServerConfigKeys; -import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.server.impl.RaftServerProxy; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.util.SizeInBytes; import org.apache.ratis.util.TimeDuration; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.util.Collection; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Collections; -import java.util.Set; -import java.util.UUID; -import java.util.ArrayList; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.LinkedBlockingDeque; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.ThreadPoolExecutor; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - /** * Creates a ratis server endpoint that acts as the communication layer for * Ozone containers. @@ -113,7 +121,7 @@ private static long nextCallId() { private long nodeFailureTimeoutMs; private boolean isStarted = false; private DatanodeDetails datanodeDetails; - private final OzoneConfiguration conf; + private final ConfigurationSource conf; // TODO: Remove the gids set when Ratis supports an api to query active // pipelines private final Set raftGids = new HashSet<>(); @@ -123,7 +131,7 @@ private static long nextCallId() { private XceiverServerRatis(DatanodeDetails dd, int port, ContainerDispatcher dispatcher, ContainerController containerController, - StateContext context, GrpcTlsConfig tlsConfig, OzoneConfiguration conf) + StateContext context, GrpcTlsConfig tlsConfig, ConfigurationSource conf) throws IOException { this.conf = conf; Objects.requireNonNull(dd, "id == null"); @@ -363,7 +371,7 @@ private void setPendingRequestsLimits(RaftProperties properties) { } public static XceiverServerRatis newXceiverServerRatis( - DatanodeDetails datanodeDetails, OzoneConfiguration ozoneConf, + DatanodeDetails datanodeDetails, ConfigurationSource ozoneConf, ContainerDispatcher dispatcher, ContainerController containerController, CertificateClient caClient, StateContext context) throws IOException { int localPort = ozoneConf.getInt( @@ -754,7 +762,7 @@ private void sendPipelineReport() { } private static List createChunkExecutors( - Configuration conf) { + ConfigurationSource conf) { // TODO create single pool with N threads if using non-incremental chunks final int threadCount = conf.getInt( OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 04eb30fe2a8..d2d29018b32 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -18,21 +18,22 @@ package org.apache.hadoop.ozone.container.common.utils; +import java.io.File; +import java.io.IOException; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.MetadataStore; +import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; +import org.apache.hadoop.ozone.OzoneConfigKeys; + import com.google.common.base.Preconditions; import org.apache.commons.collections.MapIterator; import org.apache.commons.collections.map.LRUMap; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - /** * container cache is a LRUMap that maintains the DB handles. */ @@ -57,7 +58,8 @@ private ContainerCache(int maxSize, float loadFactor, boolean * @param conf - Configuration. * @return A instance of {@link ContainerCache}. */ - public synchronized static ContainerCache getInstance(Configuration conf) { + public synchronized static ContainerCache getInstance( + ConfigurationSource conf) { if (cache == null) { int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE, OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT); @@ -111,7 +113,7 @@ protected boolean removeLRU(LinkEntry entry) { * @return ReferenceCountedDB. */ public ReferenceCountedDB getDB(long containerID, String containerDBType, - String containerDBPath, Configuration conf) + String containerDBPath, ConfigurationSource conf) throws IOException { Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 0eee4846e86..c3a5a4160e0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -19,11 +19,16 @@ package org.apache.hadoop.ozone.container.common.volume; import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.util.Objects; +import java.util.Properties; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicLong; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; @@ -31,21 +36,15 @@ import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion; import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; - import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; + +import com.google.common.base.Preconditions; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.util.Objects; -import java.util.Properties; -import java.util.UUID; -import java.util.concurrent.atomic.AtomicLong; - /** * HddsVolume represents volume in a datanode. {@link MutableVolumeSet} * maintains a list of HddsVolumes, one for each volume in the Datanode. @@ -109,7 +108,7 @@ public VolumeCheckResult check(@Nullable Boolean unused) throws Exception { */ public static class Builder { private final String volumeRootStr; - private Configuration conf; + private ConfigurationSource conf; private StorageType storageType; private String datanodeUuid; @@ -121,7 +120,7 @@ public Builder(String rootDirStr) { this.volumeRootStr = rootDirStr; } - public Builder conf(Configuration config) { + public Builder conf(ConfigurationSource config) { this.conf = config; return this; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java index ce5d16b7960..9240a85d36d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java @@ -32,7 +32,7 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.util.concurrent.MoreExecutors; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; @@ -91,7 +91,7 @@ public class HddsVolumeChecker { * @param conf Configuration object. * @param timer {@link Timer} object used for throttling checks. */ - public HddsVolumeChecker(Configuration conf, Timer timer) + public HddsVolumeChecker(ConfigurationSource conf, Timer timer) throws DiskErrorException { maxAllowedTimeForCheckMs = conf.getTimeDuration( DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 101e6804e54..fe29b8b2f72 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -33,9 +33,9 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -65,7 +65,7 @@ public class MutableVolumeSet implements VolumeSet { private static final Logger LOG = LoggerFactory.getLogger(MutableVolumeSet.class); - private Configuration conf; + private ConfigurationSource conf; /** * Maintains a map of all active volumes in the DataNode. @@ -106,12 +106,13 @@ public class MutableVolumeSet implements VolumeSet { private final HddsVolumeChecker volumeChecker; private Runnable failedVolumeListener; - public MutableVolumeSet(String dnUuid, Configuration conf) + public MutableVolumeSet(String dnUuid, ConfigurationSource conf) throws IOException { this(dnUuid, null, conf); } - public MutableVolumeSet(String dnUuid, String clusterID, Configuration conf) + public MutableVolumeSet(String dnUuid, String clusterID, + ConfigurationSource conf) throws IOException { this.datanodeUuid = dnUuid; this.clusterID = clusterID; @@ -144,7 +145,7 @@ public void setFailedVolumeListener(Runnable runnable) { } @VisibleForTesting - HddsVolumeChecker getVolumeChecker(Configuration configuration) + HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration) throws DiskChecker.DiskErrorException { return new HddsVolumeChecker(configuration, new Timer()); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 215d1e5b250..e0669c7ff5f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -18,17 +18,18 @@ package org.apache.hadoop.ozone.container.common.volume; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; + +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; -import org.apache.hadoop.fs.StorageType; + +import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; - /** * Stores information about a disk/volume. */ @@ -51,13 +52,13 @@ public final class VolumeInfo { * Builder for VolumeInfo. */ public static class Builder { - private final Configuration conf; + private final ConfigurationSource conf; private final String rootDir; private SpaceUsageCheckFactory usageCheckFactory; private StorageType storageType; private long configuredCapacity; - public Builder(String root, Configuration config) { + public Builder(String root, ConfigurationSource config) { this.rootDir = root; this.conf = config; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 6317e6356fe..200bfe443c9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -28,18 +28,14 @@ import java.util.Map; import java.util.concurrent.locks.ReentrantReadWriteLock; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerDataProto; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ContainerType; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; import org.apache.hadoop.io.nativeio.NativeIO; @@ -50,35 +46,25 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker; import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; -import org.apache.hadoop.ozone.container.keyvalue.helpers - .KeyValueContainerLocationUtil; +import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import com.google.common.base.Preconditions; import org.apache.commons.io.FileUtils; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_ALREADY_EXISTS; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_FILES_CREATE_ERROR; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.CONTAINER_INTERNAL_ERROR; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.DISK_OUT_OF_SPACE; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.ERROR_IN_COMPACT_DB; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.ERROR_IN_DB_SYNC; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.INVALID_CONTAINER_STATE; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.UNSUPPORTED_REQUEST; - -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.ERROR_IN_COMPACT_DB; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.ERROR_IN_DB_SYNC; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -96,9 +82,10 @@ public class KeyValueContainer implements Container { private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); private final KeyValueContainerData containerData; - private Configuration config; + private ConfigurationSource config; - public KeyValueContainer(KeyValueContainerData containerData, Configuration + public KeyValueContainer(KeyValueContainerData containerData, + ConfigurationSource ozoneConfig) { Preconditions.checkNotNull(containerData, "KeyValueContainerData cannot " + "be null"); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java index 1e53daa4012..95795e64c95 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdfs.util.Canceler; import org.apache.hadoop.hdfs.util.DataTransferThrottler; @@ -62,11 +62,11 @@ public class KeyValueContainerCheck { private long containerID; private KeyValueContainerData onDiskContainerData; //loaded from fs/disk - private Configuration checkConfig; + private ConfigurationSource checkConfig; private String metadataPath; - public KeyValueContainerCheck(String metadataPath, Configuration conf, + public KeyValueContainerCheck(String metadataPath, ConfigurationSource conf, long containerID) { Preconditions.checkArgument(metadataPath != null); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 15177fca137..26e98c285e3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -30,9 +30,9 @@ import java.util.function.Consumer; import java.util.function.Function; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; @@ -68,7 +68,6 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.util.AutoCloseableLock; -import org.apache.hadoop.util.ReflectionUtils; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -114,16 +113,20 @@ public class KeyValueHandler extends Handler { // A lock that is held during container creation. private final AutoCloseableLock containerCreationLock; - public KeyValueHandler(Configuration config, String datanodeId, + public KeyValueHandler(ConfigurationSource config, String datanodeId, ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics, Consumer icrSender) { super(config, datanodeId, contSet, volSet, metrics, icrSender); containerType = ContainerType.KeyValueContainer; blockManager = new BlockManagerImpl(config); chunkManager = ChunkManagerFactory.createChunkManager(config); - volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass( - HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy - .class, VolumeChoosingPolicy.class), conf); + try { + volumeChoosingPolicy = conf.getClass( + HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy + .class, VolumeChoosingPolicy.class).newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } maxContainerSize = (long)config.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 7ceb622eb43..99ead01a7f0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -18,21 +18,19 @@ package org.apache.hadoop.ozone.container.keyvalue.helpers; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import java.io.IOException; - -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.NO_SUCH_BLOCK; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .Result.UNABLE_TO_READ_METADATA_DB; +import com.google.common.base.Preconditions; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_READ_METADATA_DB; /** * Utils functions to help block functions. @@ -55,7 +53,7 @@ private BlockUtils() { * @throws StorageContainerException */ public static ReferenceCountedDB getDB(KeyValueContainerData containerData, - Configuration conf) throws + ConfigurationSource conf) throws StorageContainerException { Preconditions.checkNotNull(containerData); ContainerCache cache = ContainerCache.getInstance(conf); @@ -78,8 +76,8 @@ public static ReferenceCountedDB getDB(KeyValueContainerData containerData, * @param container - Container data. * @param conf - Configuration. */ - public static void removeDB(KeyValueContainerData container, Configuration - conf) { + public static void removeDB(KeyValueContainerData container, + ConfigurationSource conf) { Preconditions.checkNotNull(container); ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); @@ -103,7 +101,7 @@ public static void shutdownCache(ContainerCache cache) { * @param conf configuration. */ public static void addDB(ReferenceCountedDB db, String containerDBPath, - Configuration conf) { + ConfigurationSource conf) { ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); cache.addDB(containerDBPath, db); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index d4ea45950dd..9fda44bef52 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -25,7 +25,7 @@ import java.util.Map; import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; @@ -63,7 +63,7 @@ private KeyValueContainerUtil() { * @throws IOException */ public static void createContainerMetaData(File containerMetaDataPath, File - chunksPath, File dbFile, Configuration conf) throws IOException { + chunksPath, File dbFile, ConfigurationSource conf) throws IOException { Preconditions.checkNotNull(containerMetaDataPath); Preconditions.checkNotNull(conf); @@ -105,7 +105,7 @@ public static void createContainerMetaData(File containerMetaDataPath, File * @throws IOException */ public static void removeContainer(KeyValueContainerData containerData, - Configuration conf) + ConfigurationSource conf) throws IOException { Preconditions.checkNotNull(containerData); File containerMetaDataPath = new File(containerData @@ -132,7 +132,7 @@ public static void removeContainer(KeyValueContainerData containerData, * @throws IOException */ public static void parseKVContainerData(KeyValueContainerData kvContainerData, - Configuration config) throws IOException { + ConfigurationSource config) throws IOException { long containerID = kvContainerData.getContainerID(); File metadataPath = new File(kvContainerData.getMetadataPath()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 58a4e8b18b9..ee663d8ec40 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -57,7 +57,7 @@ public class BlockManagerImpl implements BlockManager { private static byte[] blockCommitSequenceIdKey = DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX); - private Configuration config; + private ConfigurationSource config; private static final String DB_NULL_ERR_MSG = "DB cannot be null here"; private static final String NO_SUCH_BLOCK_ERR_MSG = @@ -68,7 +68,7 @@ public class BlockManagerImpl implements BlockManager { * * @param conf - Ozone configuration */ - public BlockManagerImpl(Configuration conf) { + public BlockManagerImpl(ConfigurationSource conf) { Preconditions.checkNotNull(conf, "Config cannot be null"); this.config = conf; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java index c2d81b6ff31..6b3f58d5673 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue.impl; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.slf4j.Logger; @@ -39,7 +39,7 @@ public final class ChunkManagerFactory { private ChunkManagerFactory() { } - public static ChunkManager createChunkManager(Configuration conf) { + public static ChunkManager createChunkManager(ConfigurationSource conf) { boolean sync = conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY, OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index 798629ed3c0..efd16c626e2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -18,9 +18,29 @@ package org.apache.hadoop.ozone.container.keyvalue.statemachine.background; -import com.google.common.collect.Lists; +import java.io.File; +import java.io.IOException; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.utils.BackgroundService; +import org.apache.hadoop.hdds.utils.BackgroundTask; +import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; +import org.apache.hadoop.hdds.utils.BackgroundTaskResult; +import org.apache.hadoop.hdds.utils.BatchOperation; +import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; @@ -28,48 +48,21 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.util.ReflectionUtils; -import org.apache.ratis.thirdparty.com.google.protobuf - .InvalidProtocolBufferException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.util.Time; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.BackgroundTask; -import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; -import org.apache.hadoop.hdds.utils.BackgroundTaskResult; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; + +import com.google.common.collect.Lists; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; +import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; - /** * A per-datanode container block deleting service takes in charge * of deleting staled ozone blocks. @@ -82,7 +75,7 @@ public class BlockDeletingService extends BackgroundService { private OzoneContainer ozoneContainer; private ContainerDeletionChoosingPolicy containerDeletionPolicy; - private final Configuration conf; + private final ConfigurationSource conf; // Throttle number of blocks to delete per task, // set to 1 for testing @@ -98,14 +91,18 @@ public class BlockDeletingService extends BackgroundService { public BlockDeletingService(OzoneContainer ozoneContainer, long serviceInterval, long serviceTimeout, TimeUnit timeUnit, - Configuration conf) { + ConfigurationSource conf) { super("BlockDeletingService", serviceInterval, timeUnit, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.ozoneContainer = ozoneContainer; - containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass( - ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, - TopNOrderedContainerDeletionChoosingPolicy.class, - ContainerDeletionChoosingPolicy.class), conf); + try { + containerDeletionPolicy = conf.getClass( + ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, + TopNOrderedContainerDeletionChoosingPolicy.class, + ContainerDeletionChoosingPolicy.class).newInstance(); + } catch (Exception e) { + throw new RuntimeException(e); + } this.conf = conf; this.blockLimitPerTask = conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java index 297c1caf92e..e923d33c752 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java @@ -18,12 +18,15 @@ package org.apache.hadoop.ozone.container.ozoneimpl; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import java.io.File; +import java.io.FileFilter; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.scm.container.common.helpers - .StorageContainerException; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; @@ -31,25 +34,22 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; import org.apache.hadoop.ozone.container.common.impl.ContainerData; +import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; + +import com.google.common.base.Preconditions; +import com.google.common.primitives.Longs; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.FileFilter; -import java.io.IOException; -import java.util.List; - /** * Class used to read .container files from Volume and build container map. * @@ -81,12 +81,12 @@ public class ContainerReader implements Runnable { ContainerReader.class); private HddsVolume hddsVolume; private final ContainerSet containerSet; - private final OzoneConfiguration config; + private final ConfigurationSource config; private final File hddsVolumeDir; private final MutableVolumeSet volumeSet; ContainerReader(MutableVolumeSet volSet, HddsVolume volume, ContainerSet cset, - OzoneConfiguration conf) { + ConfigurationSource conf) { Preconditions.checkNotNull(volume); this.hddsVolume = volume; this.hddsVolumeDir = hddsVolume.getHddsRootDir(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index d82ca4f9e9f..bbbec25af78 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -26,7 +26,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Consumer; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; @@ -72,7 +72,7 @@ public class OzoneContainer { private final HddsDispatcher hddsDispatcher; private final Map handlers; - private final OzoneConfiguration config; + private final ConfigurationSource config; private final MutableVolumeSet volumeSet; private final ContainerSet containerSet; private final XceiverServerSpi writeChannel; @@ -90,7 +90,7 @@ public class OzoneContainer { * @throws DiskOutOfSpaceException * @throws IOException */ - public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration + public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource conf, StateContext context, CertificateClient certClient) throws IOException { config = conf; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java index 37a44acf74c..d7666ea1127 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java @@ -24,7 +24,7 @@ import java.util.concurrent.CompletableFuture; import java.util.function.Function; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -46,7 +46,7 @@ public class SimpleContainerDownloader implements ContainerDownloader { private final Path workingDirectory; - public SimpleContainerDownloader(Configuration conf) { + public SimpleContainerDownloader(ConfigurationSource conf) { String workDirString = conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java index 6f159b438ef..aabde54c3ec 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java @@ -17,33 +17,33 @@ package org.apache.hadoop.ozone.container.common; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.Random; +import java.util.UUID; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.security.UserGroupInformation; -import org.mockito.Mockito; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.Random; -import java.util.UUID; +import org.mockito.Mockito; /** * Helper utility to test containers. @@ -77,7 +77,8 @@ public static EndpointStateMachine createEndpoint(Configuration conf, StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient = new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy); - return new EndpointStateMachine(address, rpcClient, conf); + return new EndpointStateMachine(address, rpcClient, + new LegacyHadoopConfigurationSource(conf)); } public static OzoneContainer getOzoneContainer( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java index b2d412ab203..f696ac3713d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java @@ -22,18 +22,20 @@ import java.net.ServerSocket; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; -import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol; -import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB; import org.apache.hadoop.test.GenericTestUtils; @@ -85,11 +87,14 @@ private static RPC.Server startRpcServer(Configuration conf, /** * Start Datanode RPC server. */ - public static RPC.Server startScmRpcServer(Configuration configuration, + public static RPC.Server startScmRpcServer(ConfigurationSource configuration, StorageContainerDatanodeProtocol server, InetSocketAddress rpcServerAddresss, int handlerCount) throws IOException { - RPC.setProtocolEngine(configuration, + + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration); + RPC.setProtocolEngine(hadoopConfig, StorageContainerDatanodeProtocolPB.class, ProtobufRpcEngine.class); @@ -99,7 +104,7 @@ public static RPC.Server startScmRpcServer(Configuration configuration, new StorageContainerDatanodeProtocolServerSideTranslatorPB( server, Mockito.mock(ProtocolMessageMetrics.class))); - RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss, + RPC.Server scmServer = startRpcServer(hadoopConfig, rpcServerAddresss, StorageContainerDatanodeProtocolPB.class, scmDatanodeService, handlerCount); @@ -133,20 +138,20 @@ public static OzoneConfiguration getOzoneConf() { } public static HddsProtos.ReplicationType getReplicationType( - Configuration conf) { + ConfigurationSource conf) { return isUseRatis(conf) ? HddsProtos.ReplicationType.RATIS : HddsProtos.ReplicationType.STAND_ALONE; } public static HddsProtos.ReplicationFactor getReplicationFactor( - Configuration conf) { + ConfigurationSource conf) { return isUseRatis(conf) ? HddsProtos.ReplicationFactor.THREE : HddsProtos.ReplicationFactor.ONE; } - private static boolean isUseRatis(Configuration c) { + private static boolean isUseRatis(ConfigurationSource c) { return c.getBoolean( ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 303ebd78e18..a19ad648af4 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -17,13 +17,22 @@ package org.apache.hadoop.ozone.container.common; -import com.google.common.collect.Lists; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdfs.DFSUtil; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.BackgroundService; +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.container.ContainerTestHelper; @@ -35,40 +44,31 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl; -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; -import org.apache.hadoop.hdds.utils.BackgroundService; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; + +import com.google.common.collect.Lists; +import org.apache.commons.io.FileUtils; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import org.junit.AfterClass; import org.junit.Assert; -import org.junit.Test; import org.junit.BeforeClass; +import org.junit.Test; import org.junit.runner.RunWith; import org.junit.runners.Parameterized; - -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -119,7 +119,8 @@ public static void cleanup() throws IOException { * creates some fake chunk files for testing. */ private void createToDeleteBlocks(ContainerSet containerSet, - Configuration conf, int numOfContainers, int numOfBlocksPerContainer, + ConfigurationSource conf, int numOfContainers, + int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException { for (int x = 0; x < numOfContainers; x++) { conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); @@ -195,7 +196,7 @@ private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException { @Test public void testBlockDeletion() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); @@ -250,7 +251,7 @@ public void testBlockDeletion() throws Exception { @Test @SuppressWarnings("java:S2699") // waitFor => assertion with timeout public void testShutdownService() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); @@ -275,7 +276,7 @@ public void testShutdownService() throws Exception { @Test public void testBlockDeletionTimeout() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); @@ -336,7 +337,7 @@ public void testBlockDeletionTimeout() throws Exception { } private BlockDeletingServiceTestImpl getBlockDeletingService( - ContainerSet containerSet, Configuration conf) { + ContainerSet containerSet, ConfigurationSource conf) { OzoneContainer ozoneContainer = mockDependencies(containerSet); return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, conf); } @@ -363,7 +364,7 @@ public void testContainerThrottle() throws Exception { // // Each time only 1 container can be processed, so each time // 1 block from 1 container can be deleted. - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); // Process 1 container per interval conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, @@ -406,7 +407,7 @@ public void testBlockThrottle() throws Exception { // Each time containers can be all scanned, but only 2 blocks // per container can be actually deleted. So it requires 2 waves // to cleanup all blocks. - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); int blockLimitPerTask = 2; conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, blockLimitPerTask); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java index 0f3e7d12d22..65ae6ce5771 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java @@ -16,54 +16,48 @@ */ package org.apache.hadoop.ozone.container.common; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .SCMConnectionManager; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager; import org.apache.hadoop.ozone.container.common.states.DatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .InitDatanodeState; -import org.apache.hadoop.ozone.container.common.states.datanode - .RunningDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState; +import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.concurrent.HadoopExecutors; + +import com.google.common.collect.Maps; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; import org.junit.After; import org.junit.Assert; +import static org.junit.Assert.assertTrue; import org.junit.Before; import org.junit.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT; -import static org.junit.Assert.assertTrue; - /** * Tests the datanode state machine class and its states. */ @@ -77,7 +71,7 @@ public class TestDatanodeStateMachine { private List scmServers; private List mockServers; private ExecutorService executorService; - private Configuration conf; + private OzoneConfiguration conf; private File testRoot; @Before @@ -403,7 +397,7 @@ public void testDatanodeStateMachineWithInvalidConfiguration() ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, "")); confList.forEach((entry) -> { - Configuration perTestConf = new Configuration(conf); + OzoneConfiguration perTestConf = new OzoneConfiguration(conf); perTestConf.setStrings(entry.getKey(), entry.getValue()); LOG.info("Test with {} = {}", entry.getKey(), entry.getValue()); try (DatanodeStateMachine stateMachine = new DatanodeStateMachine( diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java index 324ab71ed70..7b41d99278f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java @@ -18,9 +18,9 @@ package org.apache.hadoop.ozone.container.common.interfaces; -import com.google.common.collect.Maps; +import java.util.Map; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -29,10 +29,11 @@ import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import com.google.common.collect.Maps; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -42,8 +43,6 @@ import org.junit.rules.Timeout; import org.mockito.Mockito; -import java.util.Map; - /** * Tests Handler interface. */ @@ -51,7 +50,7 @@ public class TestHandler { @Rule public TestRule timeout = new Timeout(300000); - private Configuration conf; + private OzoneConfiguration conf; private HddsDispatcher dispatcher; private ContainerSet containerSet; private VolumeSet volumeSet; @@ -59,7 +58,7 @@ public class TestHandler { @Before public void setup() throws Exception { - this.conf = new Configuration(); + this.conf = new OzoneConfiguration(); this.containerSet = Mockito.mock(ContainerSet.class); this.volumeSet = Mockito.mock(MutableVolumeSet.class); DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java index aae388dd5a1..45e50dcba6c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java @@ -17,16 +17,15 @@ package org.apache.hadoop.ozone.container.common.report; -import org.apache.hadoop.conf.Configuration; +import java.util.concurrent.ScheduledExecutorService; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.concurrent.ScheduledExecutorService; +import org.junit.Test; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; +import org.mockito.Mockito; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -37,7 +36,7 @@ public class TestReportManager { @Test public void testReportManagerInit() { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext dummyContext = Mockito.mock(StateContext.class); ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class); ReportManager.Builder builder = ReportManager.newBuilder(conf); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java index 03f0cd4d816..166aadf6dbb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java @@ -21,7 +21,7 @@ import com.google.protobuf.GeneratedMessage; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsIdFactory; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -51,7 +51,7 @@ */ public class TestReportPublisher { - private static Configuration config; + private static ConfigurationSource config; @BeforeClass public static void setup() { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java index f8c5fe5e275..e9a34c74ffb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java @@ -17,13 +17,11 @@ package org.apache.hadoop.ozone.container.common.report; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; + import org.junit.Assert; import org.junit.Rule; import org.junit.Test; @@ -39,7 +37,7 @@ public class TestReportPublisherFactory { @Test public void testGetContainerReportPublisher() { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); ReportPublisherFactory factory = new ReportPublisherFactory(conf); ReportPublisher publisher = factory .getPublisherFor(ContainerReportsProto.class); @@ -49,7 +47,7 @@ public void testGetContainerReportPublisher() { @Test public void testGetNodeReportPublisher() { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); ReportPublisherFactory factory = new ReportPublisherFactory(conf); ReportPublisher publisher = factory .getPublisherFor(NodeReportProto.class); @@ -59,7 +57,7 @@ public void testGetNodeReportPublisher() { @Test public void testInvalidReportPublisher() { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); ReportPublisherFactory factory = new ReportPublisherFactory(conf); exception.expect(RuntimeException.class); exception.expectMessage("No publisher found for report"); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java index 95ac87fba86..1c66d63c390 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java @@ -18,39 +18,29 @@ package org.apache.hadoop.ozone.container.common.states.endpoint; -import org.apache.hadoop.conf.Configuration; +import java.net.InetSocketAddress; +import java.util.UUID; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerAction; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReportsProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine - .DatanodeStateMachine.DatanodeStates; -import org.apache.hadoop.ozone.container.common.statemachine - .EndpointStateMachine; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates; +import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; -import org.apache.hadoop.ozone.protocolPB - .StorageContainerDatanodeProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB; import org.junit.Assert; import org.junit.Test; import org.mockito.ArgumentCaptor; import org.mockito.Mockito; -import java.net.InetSocketAddress; -import java.util.UUID; - /** * This class tests the functionality of HeartbeatEndpointTask. */ @@ -86,7 +76,7 @@ public void testheartbeatWithoutReports() throws Exception { @Test public void testheartbeatWithNodeReports() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext context = new StateContext(conf, DatanodeStates.RUNNING, Mockito.mock(DatanodeStateMachine.class)); @@ -118,7 +108,7 @@ public void testheartbeatWithNodeReports() throws Exception { @Test public void testheartbeatWithContainerReports() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext context = new StateContext(conf, DatanodeStates.RUNNING, Mockito.mock(DatanodeStateMachine.class)); @@ -150,7 +140,7 @@ public void testheartbeatWithContainerReports() throws Exception { @Test public void testheartbeatWithCommandStatusReports() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext context = new StateContext(conf, DatanodeStates.RUNNING, Mockito.mock(DatanodeStateMachine.class)); @@ -182,7 +172,7 @@ public void testheartbeatWithCommandStatusReports() throws Exception { @Test public void testheartbeatWithContainerActions() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext context = new StateContext(conf, DatanodeStates.RUNNING, Mockito.mock(DatanodeStateMachine.class)); @@ -214,7 +204,7 @@ public void testheartbeatWithContainerActions() throws Exception { @Test public void testheartbeatWithAllReports() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext context = new StateContext(conf, DatanodeStates.RUNNING, Mockito.mock(DatanodeStateMachine.class)); @@ -256,7 +246,7 @@ public void testheartbeatWithAllReports() throws Exception { */ private HeartbeatEndpointTask getHeartbeatEndpointTask( StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); StateContext context = new StateContext(conf, DatanodeStates.RUNNING, Mockito.mock(DatanodeStateMachine.class)); return getHeartbeatEndpointTask(conf, context, proxy); @@ -274,7 +264,7 @@ private HeartbeatEndpointTask getHeartbeatEndpointTask( * @return HeartbeatEndpointTask */ private HeartbeatEndpointTask getHeartbeatEndpointTask( - Configuration conf, + ConfigurationSource conf, StateContext context, StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) { DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder() diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java index 0d9c876fae6..57a0e5544a3 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java @@ -17,31 +17,31 @@ package org.apache.hadoop.ozone.container.common.volume; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; -import org.apache.hadoop.hdds.fs.SpaceUsagePersistence; -import org.apache.hadoop.hdds.fs.SpaceUsageSource; -import org.apache.hadoop.fs.StorageType; -import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; -import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - import java.io.File; import java.time.Duration; import java.util.Properties; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.SpaceUsagePersistence; +import org.apache.hadoop.hdds.fs.SpaceUsageSource; +import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile; +import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; + import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory; import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; /** * Unit tests for {@link HddsVolume}. @@ -50,7 +50,7 @@ public class TestHddsVolume { private static final String DATANODE_UUID = UUID.randomUUID().toString(); private static final String CLUSTER_ID = UUID.randomUUID().toString(); - private static final Configuration CONF = new Configuration(); + private static final OzoneConfiguration CONF = new OzoneConfiguration(); @Rule public TemporaryFolder folder = new TemporaryFolder(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 19ee54d8dfb..b2a39a9eea7 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -18,25 +18,25 @@ package org.apache.hadoop.ozone.container.common.volume; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsagePersistence; import org.apache.hadoop.hdds.fs.SpaceUsageSource; -import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory; -import org.apache.hadoop.hdds.fs.MockSpaceUsageSource; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; + +import static org.apache.hadoop.test.GenericTestUtils.getTestDir; import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.io.IOException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.List; - -import static org.apache.hadoop.test.GenericTestUtils.getTestDir; - /** * Tests {@link RoundRobinVolumeChoosingPolicy}. */ @@ -45,7 +45,7 @@ public class TestRoundRobinVolumeChoosingPolicy { private RoundRobinVolumeChoosingPolicy policy; private final List volumes = new ArrayList<>(); - private static final Configuration CONF = new Configuration(); + private static final OzoneConfiguration CONF = new OzoneConfiguration(); private static final String BASE_DIR = getTestDir(TestRoundRobinVolumeChoosingPolicy.class.getSimpleName()) .getAbsolutePath(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index c913d5376aa..3bd42d89fba 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -26,8 +26,8 @@ import java.util.Set; import java.util.UUID; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -63,7 +63,7 @@ public class TestVolumeSetDiskChecks { @Rule public ExpectedException thrown = ExpectedException.none(); - private Configuration conf = null; + private OzoneConfiguration conf = null; /** * Cleanup volume directories. @@ -115,7 +115,7 @@ public void testBadDirectoryDetection() throws IOException { final MutableVolumeSet volumeSet = new MutableVolumeSet( UUID.randomUUID().toString(), conf) { @Override - HddsVolumeChecker getVolumeChecker(Configuration configuration) + HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration) throws DiskErrorException { return new DummyChecker(configuration, new Timer(), numBadVolumes); } @@ -139,7 +139,7 @@ public void testAllVolumesAreBad() throws IOException { final MutableVolumeSet volumeSet = new MutableVolumeSet( UUID.randomUUID().toString(), conf) { @Override - HddsVolumeChecker getVolumeChecker(Configuration configuration) + HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration) throws DiskErrorException { return new DummyChecker(configuration, new Timer(), numVolumes); } @@ -155,8 +155,8 @@ HddsVolumeChecker getVolumeChecker(Configuration configuration) * storage directories. * @param numDirs */ - private Configuration getConfWithDataNodeDirs(int numDirs) { - final Configuration ozoneConf = new OzoneConfiguration(); + private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) { + final OzoneConfiguration ozoneConf = new OzoneConfiguration(); final List dirs = new ArrayList<>(); for (int i = 0; i < numDirs; ++i) { dirs.add(GenericTestUtils.getRandomizedTestDir().getPath()); @@ -173,7 +173,7 @@ private Configuration getConfWithDataNodeDirs(int numDirs) { static class DummyChecker extends HddsVolumeChecker { private final int numBadVolumes; - DummyChecker(Configuration conf, Timer timer, int numBadVolumes) + DummyChecker(ConfigurationSource conf, Timer timer, int numBadVolumes) throws DiskErrorException { super(conf, timer); this.numBadVolumes = numBadVolumes; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index ab4d5f6a3fc..256f8b75feb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -18,49 +18,46 @@ package org.apache.hadoop.ozone.container.keyvalue; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.UUID; + import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; -import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; - -import java.io.File; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.List; -import java.util.NoSuchElementException; -import java.util.UUID; +import com.google.common.primitives.Longs; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_IMPL_LEVELDB; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_METADATA_STORE_IMPL_ROCKSDB; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; +import org.junit.After; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; /** * This class is used to test KeyValue container block iterator. @@ -71,7 +68,7 @@ public class TestKeyValueBlockIterator { private KeyValueContainer container; private KeyValueContainerData containerData; private MutableVolumeSet volumeSet; - private Configuration conf; + private OzoneConfiguration conf; private File testRoot; private final String storeImpl; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 1f5c677a5d6..9ad6e5b126b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -23,7 +23,6 @@ import java.util.HashMap; import java.util.UUID; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -255,7 +254,7 @@ public void testHandlerCommandHandling() throws Exception { @Test public void testVolumeSetInKeyValueHandler() throws Exception{ File path = GenericTestUtils.getRandomizedTestDir(); - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); MutableVolumeSet volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf); @@ -312,7 +311,7 @@ private ContainerCommandRequestProto getDummyCommandRequestProto( @Test public void testCloseInvalidContainer() throws IOException { long containerID = 1234L; - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); KeyValueContainerData kvData = new KeyValueContainerData(containerID, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java index a136983415b..ecb7af8054d 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java @@ -16,18 +16,18 @@ */ package org.apache.hadoop.ozone.container.testutils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.container.keyvalue.statemachine.background - .BlockDeletingService; -import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; - import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; +import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.ThreadFactoryBuilder; + /** * A test class implementation for {@link BlockDeletingService}. */ @@ -43,7 +43,7 @@ public class BlockDeletingServiceTestImpl private AtomicInteger numOfProcessed = new AtomicInteger(0); public BlockDeletingServiceTestImpl(OzoneContainer container, - int serviceInterval, Configuration conf) { + int serviceInterval, ConfigurationSource conf) { super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS, TimeUnit.MILLISECONDS, conf); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java index 2c5c81782a8..368197a2764 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java @@ -17,30 +17,27 @@ */ package org.apache.hadoop.hdds.conf; -import com.google.gson.Gson; -import java.io.IOException; -import java.io.Writer; - -import java.util.HashMap; -import java.util.Map; -import java.util.Properties; import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.core.HttpHeaders; +import java.io.IOException; +import java.io.Writer; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer2; import com.google.common.annotations.VisibleForTesting; +import com.google.gson.Gson; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY; - /** * A servlet to print out the running configuration data. */ @@ -63,8 +60,9 @@ public class HddsConfServlet extends HttpServlet { * Return the Configuration of the daemon hosting this servlet. * This is populated when the HttpServer starts. */ - private Configuration getConfFromContext() { - Configuration conf = (Configuration) getServletContext().getAttribute( + private OzoneConfiguration getConfFromContext() { + OzoneConfiguration conf = + (OzoneConfiguration) getServletContext().getAttribute( HttpServer2.CONF_CONTEXT_ATTRIBUTE); assert conf != null; return conf; @@ -127,11 +125,11 @@ static String parseAcceptHeader(HttpServletRequest request) { /** * Guts of the servlet - extracted for easy testing. */ - static void writeResponse(Configuration conf, + static void writeResponse(OzoneConfiguration conf, Writer out, String format, String propertyName) throws IOException, IllegalArgumentException, BadFormatException { if (FORMAT_JSON.equals(format)) { - Configuration.dumpConfiguration(conf, propertyName, out); + OzoneConfiguration.dumpConfiguration(conf, propertyName, out); } else if (FORMAT_XML.equals(format)) { conf.writeXml(propertyName, out); } else { @@ -155,7 +153,7 @@ private void processConfigTagRequest(HttpServletRequest request, Writer out) throws IOException { String cmd = request.getParameter(COMMAND); Gson gson = new Gson(); - Configuration config = getOzoneConfig(); + OzoneConfiguration config = getOzoneConfig(); switch (cmd) { case "getOzoneTags": @@ -184,7 +182,7 @@ private void processConfigTagRequest(HttpServletRequest request, } - private static Configuration getOzoneConfig() { + private static OzoneConfiguration getOzoneConfig() { return OZONE_CONFIG; } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java index 21a19b536ec..f740e437435 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java @@ -18,12 +18,21 @@ */ package org.apache.hadoop.hdds.security.x509.certificates.utils; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.io.StringReader; +import java.io.StringWriter; +import java.security.KeyPair; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; + +import com.google.common.base.Preconditions; import org.apache.logging.log4j.util.Strings; import org.bouncycastle.asn1.ASN1EncodableVector; import org.bouncycastle.asn1.ASN1Object; @@ -50,14 +59,6 @@ import org.bouncycastle.util.io.pem.PemObject; import org.bouncycastle.util.io.pem.PemReader; -import java.io.IOException; -import java.io.StringReader; -import java.io.StringWriter; -import java.security.KeyPair; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; - /** * A certificate sign request object that wraps operations to build a * PKCS10CertificationRequest to CertificateServer. @@ -154,7 +155,7 @@ public static class Builder { private boolean digitalEncryption; public CertificateSignRequest.Builder setConfiguration( - Configuration configuration) { + ConfigurationSource configuration) { this.config = new SecurityConfig(configuration); return this; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java index 1fd6d7c9af6..7ecc16109ed 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java @@ -19,13 +19,23 @@ package org.apache.hadoop.hdds.security.x509.certificates.utils; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.math.BigInteger; +import java.security.KeyPair; +import java.time.Duration; +import java.time.LocalDate; +import java.time.LocalTime; +import java.time.ZoneOffset; +import java.util.Date; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; import org.apache.hadoop.util.Time; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import org.apache.logging.log4j.util.Strings; import org.bouncycastle.asn1.DEROctetString; import org.bouncycastle.asn1.x500.X500Name; @@ -40,15 +50,6 @@ import org.bouncycastle.operator.OperatorCreationException; import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder; -import java.io.IOException; -import java.math.BigInteger; -import java.security.KeyPair; -import java.time.Duration; -import java.time.LocalDate; -import java.time.LocalTime; -import java.time.ZoneOffset; -import java.util.Date; - /** * A Self Signed Certificate with CertificateServer basic constraint can be used * to bootstrap a certificate infrastructure, if no external certificate is @@ -158,7 +159,7 @@ public static class Builder { private SecurityConfig config; private boolean isCA; - public Builder setConfiguration(Configuration configuration) { + public Builder setConfiguration(ConfigurationSource configuration) { this.config = new SecurityConfig(configuration); return this; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java index 640f5ca0b94..1f3b6659888 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java @@ -18,16 +18,17 @@ */ package org.apache.hadoop.hdds.security.x509.keys; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.security.KeyPair; import java.security.KeyPairGenerator; import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A class to generate Key Pair for use with Certificates. */ @@ -41,7 +42,7 @@ public class HDDSKeyGenerator { * * @param configuration - config */ - public HDDSKeyGenerator(Configuration configuration) { + public HDDSKeyGenerator(ConfigurationSource configuration) { this.securityConfig = new SecurityConfig(configuration); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 9c87018301f..08f04f1cde7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -21,8 +21,8 @@ import java.net.InetSocketAddress; import java.util.Collection; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ipc.RPC; @@ -138,7 +138,7 @@ public static void releaseConnection(HttpRequestBase request) { * @param conf * @return */ - public static File getScmDbDir(Configuration conf) { + public static File getScmDbDir(ConfigurationSource conf) { File metadataDir = getDirectoryFromConfig(conf, ScmConfigKeys.OZONE_SCM_DB_DIRS, "SCM"); if (metadataDir != null) { @@ -159,7 +159,7 @@ public static File getScmDbDir(Configuration conf) { * @param componentName Which component's key is this * @return File created from the value of the key in conf. */ - public static File getDirectoryFromConfig(Configuration conf, + public static File getDirectoryFromConfig(ConfigurationSource conf, String key, String componentName) { final Collection metadirs = conf.getTrimmedStringCollection(key); @@ -191,7 +191,7 @@ public static File getDirectoryFromConfig(Configuration conf, * @return File MetaDir * @throws IllegalArgumentException if the configuration setting is not set */ - public static File getOzoneMetaDirPath(Configuration conf) { + public static File getOzoneMetaDirPath(ConfigurationSource conf) { File dirPath = getDirectoryFromConfig(conf, HddsConfigKeys.OZONE_METADATA_DIRS, "Ozone"); if (dirPath == null) { @@ -215,7 +215,7 @@ public static void setOzoneMetaDirPath(OzoneConfiguration conf, * @param key The configuration key which specify the directory. * @return The path of the directory. */ - public static File getDBPath(Configuration conf, String key) { + public static File getDBPath(ConfigurationSource conf, String key) { final File dbDirPath = getDirectoryFromConfig(conf, key, "OM"); if (dbDirPath != null) { @@ -233,7 +233,7 @@ public static String getRemoteUserName() { return remoteUser != null ? remoteUser.getUserName() : null; } - public static String getDefaultRatisDirectory(Configuration conf) { + public static String getDefaultRatisDirectory(ConfigurationSource conf) { LOG.warn("Storage directory for Ratis is not configured. It is a good " + "idea to map this to an SSD disk. Falling back to {}", HddsConfigKeys.OZONE_METADATA_DIRS); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java index 5e6d0e6ab61..2f6df583603 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java @@ -27,7 +27,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.HddsConfServlet; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -62,7 +64,7 @@ public abstract class BaseHttpServer { "org.eclipse.jetty.webapp.basetempdir"; private HttpServer2 httpServer; - private final Configuration conf; + private final ConfigurationSource conf; private InetSocketAddress httpAddress; private InetSocketAddress httpsAddress; @@ -76,7 +78,8 @@ public abstract class BaseHttpServer { private boolean profilerSupport; - public BaseHttpServer(Configuration conf, String name) throws IOException { + public BaseHttpServer(ConfigurationSource conf, String name) + throws IOException { this.name = name; this.conf = conf; policy = getHttpPolicy(conf); @@ -87,7 +90,7 @@ public BaseHttpServer(Configuration conf, String name) throws IOException { // Avoid registering o.a.h.http.PrometheusServlet in HttpServer2. // TODO: Replace "hadoop.prometheus.endpoint.enabled" with // CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED when possible. - conf.setBoolean("hadoop.prometheus.endpoint.enabled", false); + conf.set("hadoop.prometheus.endpoint.enabled", "false"); HttpServer2.Builder builder = newHttpServer2BuilderForOzone( conf, httpAddress, httpsAddress, @@ -140,7 +143,7 @@ public BaseHttpServer(Configuration conf, String name) throws IOException { * Recon to initialize their HTTP / HTTPS server. */ public static HttpServer2.Builder newHttpServer2BuilderForOzone( - Configuration conf, final InetSocketAddress httpAddr, + ConfigurationSource conf, final InetSocketAddress httpAddr, final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey, String spnegoKeytabFileKey) throws IOException { HttpConfig.Policy policy = getHttpPolicy(conf); @@ -172,7 +175,7 @@ public static HttpServer2.Builder newHttpServer2BuilderForOzone( } if (policy.isHttpsEnabled() && httpsAddr != null) { - Configuration sslConf = loadSslConfiguration(conf); + ConfigurationSource sslConf = loadSslConfiguration(conf); loadSslConfToHttpServerBuilder(builder, sslConf); if (httpsAddr.getPort() == 0) { @@ -295,7 +298,7 @@ public void updateConnectorAddress() { public static HttpServer2.Builder loadSslConfToHttpServerBuilder( - HttpServer2.Builder builder, Configuration sslConf) { + HttpServer2.Builder builder, ConfigurationSource sslConf) { return builder .needsClientAuth( sslConf.getBoolean(OZONE_CLIENT_HTTPS_NEED_AUTH_KEY, @@ -320,7 +323,7 @@ public static HttpServer2.Builder loadSslConfToHttpServerBuilder( * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty * else return defaultKey */ - public static String getSpnegoKeytabKey(Configuration conf, + public static String getSpnegoKeytabKey(ConfigurationSource conf, String defaultKey) { String value = conf.get( @@ -339,7 +342,7 @@ public static String getSpnegoKeytabKey(Configuration conf, * @param alias name of the credential to retreive * @return String credential value or null */ - static String getPassword(Configuration conf, String alias) { + static String getPassword(ConfigurationSource conf, String alias) { String password = null; try { char[] passchars = conf.getPassword(alias); @@ -357,8 +360,10 @@ static String getPassword(Configuration conf, String alias) { /** * Load HTTPS-related configuration. */ - public static Configuration loadSslConfiguration(Configuration conf) { - Configuration sslConf = new Configuration(false); + public static ConfigurationSource loadSslConfiguration( + ConfigurationSource conf) { + Configuration sslConf = + new Configuration(false); sslConf.addResource(conf.get( OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, @@ -383,7 +388,7 @@ public static Configuration loadSslConfiguration(Configuration conf) { boolean requireClientAuth = conf.getBoolean( OZONE_CLIENT_HTTPS_NEED_AUTH_KEY, OZONE_CLIENT_HTTPS_NEED_AUTH_DEFAULT); sslConf.setBoolean(OZONE_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth); - return sslConf; + return new LegacyHadoopConfigurationSource(sslConf); } public InetSocketAddress getHttpAddress() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java index 388fc21947b..4d6b65b502d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.server.http; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; /** * Initialize a javax.servlet.Filter. @@ -29,5 +29,5 @@ public abstract class FilterInitializer { * @param conf Configuration for run-time parameters */ public abstract void initFilter(FilterContainer container, - Configuration conf); + ConfigurationSource conf); } \ No newline at end of file diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java index 663a5551229..f340bdf639a 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java @@ -20,7 +20,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.OzoneConfigKeys; /** @@ -61,7 +61,7 @@ public boolean isHttpsEnabled() { } } - public static Policy getHttpPolicy(Configuration conf) { + public static Policy getHttpPolicy(ConfigurationSource conf) { String policyStr = conf.get(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY, OzoneConfigKeys.OZONE_HTTP_POLICY_DEFAULT); HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 097580b0204..a57fe5496c7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -17,6 +17,17 @@ */ package org.apache.hadoop.hdds.server.http; +import javax.servlet.Filter; +import javax.servlet.FilterChain; +import javax.servlet.FilterConfig; +import javax.servlet.ServletContext; +import javax.servlet.ServletException; +import javax.servlet.ServletRequest; +import javax.servlet.ServletResponse; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletRequestWrapper; +import javax.servlet.http.HttpServletResponse; import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; @@ -37,29 +48,15 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import javax.servlet.Filter; -import javax.servlet.FilterChain; -import javax.servlet.FilterConfig; -import javax.servlet.ServletContext; -import javax.servlet.ServletException; -import javax.servlet.ServletRequest; -import javax.servlet.ServletResponse; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletRequestWrapper; -import javax.servlet.http.HttpServletResponse; - -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Lists; -import com.sun.jersey.spi.container.servlet.ServletContainer; import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.conf.ConfServlet; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration.IntegerRanges; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.jmx.JMXJsonServlet; import org.apache.hadoop.log.LogLevel; import org.apache.hadoop.security.AuthenticationFilterInitializer; @@ -72,6 +69,11 @@ import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; + +import com.google.common.base.Preconditions; +import com.google.common.collect.ImmutableMap; +import com.google.common.collect.Lists; +import com.sun.jersey.spi.container.servlet.ServletContainer; import org.eclipse.jetty.http.HttpVersion; import org.eclipse.jetty.server.ConnectionFactory; import org.eclipse.jetty.server.Connector; @@ -192,8 +194,8 @@ public final class HttpServer2 implements FilterContainer { public static class Builder { private ArrayList endpoints = Lists.newArrayList(); private String name; - private Configuration conf; - private Configuration sslConf; + private ConfigurationSource conf; + private ConfigurationSource sslConf; private String[] pathSpecs; private AccessControlList adminsAcl; private boolean securityEnabled = false; @@ -291,7 +293,7 @@ public Builder setPortRanges(IntegerRanges ranges) { return this; } - public Builder setConf(Configuration configuration) { + public Builder setConf(ConfigurationSource configuration) { this.conf = configuration; return this; } @@ -300,7 +302,7 @@ public Builder setConf(Configuration configuration) { * Specify the SSL configuration to load. This API provides an alternative * to keyStore/keyPassword/trustStore. */ - public Builder setSSLConf(Configuration sslCnf) { + public Builder setSSLConf(ConfigurationSource sslCnf) { this.sslConf = sslCnf; return this; } @@ -368,14 +370,15 @@ public Builder setXFrameOption(String option) { } /** - * A wrapper of {@link Configuration#getPassword(String)}. It returns + * A wrapper of {@link ConfigurationSource#getPassword(String)}. It returns * String instead of char[]. * * @param conf the configuration * @param name the property name * @return the password string or null */ - private static String getPasswordString(Configuration conf, String name) + private static String getPasswordString(ConfigurationSource conf, + String name) throws IOException { char[] passchars = conf.getPassword(name); if (passchars == null) { @@ -426,7 +429,7 @@ public HttpServer2 build() throws IOException { } if (this.conf == null) { - conf = new Configuration(); + conf = new OzoneConfiguration(); } HttpServer2 server = new HttpServer2(this); @@ -565,7 +568,7 @@ private HttpServer2(final Builder b) throws IOException { } private void initializeWebServer(String name, String hostName, - Configuration conf, String[] pathSpecs) + ConfigurationSource conf, String[] pathSpecs) throws IOException { Preconditions.checkNotNull(webAppContext); @@ -602,7 +605,6 @@ private void initializeWebServer(String name, String hostName, addGlobalFilter("safety", QuotingInputFilter.class.getName(), xFrameParams); final FilterInitializer[] initializers = getFilterInitializers(conf); if (initializers != null) { - conf = new Configuration(conf); conf.set(BIND_ADDRESS, hostName); for (FilterInitializer c : initializers) { c.initFilter(this, conf); @@ -654,18 +656,20 @@ private static WebAppContext createWebAppContext(Builder b, private static SignerSecretProvider constructSecretProvider(final Builder b, ServletContext ctx) throws Exception { - final Configuration conf = b.conf; + final ConfigurationSource conf = b.conf; Properties config = getFilterProperties(conf, b.authFilterConfigurationPrefix); return AuthenticationFilter.constructSecretProvider( ctx, config, b.disallowFallbackToRandomSignerSecretProvider); } - private static Properties getFilterProperties(Configuration conf, String + private static Properties getFilterProperties(ConfigurationSource conf, String prefix) { Properties prop = new Properties(); Map filterConfig = AuthenticationFilterInitializer - .getFilterConfigMap(conf, prefix); + .getFilterConfigMap( + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf), + prefix); prop.putAll(filterConfig); return prop; } @@ -678,7 +682,8 @@ private static void addNoCacheFilter(ServletContextHandler ctxt) { /** * Get an array of FilterConfiguration specified in the conf. */ - private static FilterInitializer[] getFilterInitializers(Configuration conf) { + private static FilterInitializer[] getFilterInitializers( + ConfigurationSource conf) { if (conf == null) { return null; } @@ -691,8 +696,12 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { FilterInitializer[] initializers = new FilterInitializer[classes.length]; for (int i = 0; i < classes.length; i++) { - initializers[i] = (FilterInitializer) ReflectionUtils.newInstance( - classes[i], conf); + try { + initializers[i] = (FilterInitializer) classes[i].newInstance(); + } catch (Exception e) { + LOG.error("Can't initialize the filter initializer {}", + classes[i].getCanonicalName(), e); + } } return initializers; } @@ -703,7 +712,7 @@ private static FilterInitializer[] getFilterInitializers(Configuration conf) { * @throws IOException */ protected void addDefaultApps(ContextHandlerCollection parent, - final String appDir, Configuration conf) throws IOException { + final String appDir, ConfigurationSource conf) throws IOException { // set up the context for "/logs/" if "hadoop.log.dir" property is defined // and it's enabled. String logDir = System.getProperty("hadoop.log.dir"); @@ -750,7 +759,7 @@ protected void addDefaultApps(ContextHandlerCollection parent, } private void setContextAttributes(ServletContextHandler context, - Configuration conf) { + ConfigurationSource conf) { context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf); context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl); } @@ -1110,7 +1119,7 @@ public void setThreads(int min, int max) { pool.setMaxThreads(max); } - private void initSpnego(Configuration conf, String hostName, + private void initSpnego(ConfigurationSource conf, String hostName, String usernameConfKey, String keytabConfKey) throws IOException { Map params = new HashMap<>(); String principalInConf = conf.get(usernameConfKey); @@ -1380,8 +1389,9 @@ public String toString() { public static boolean isInstrumentationAccessAllowed( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + ConfigurationSource conf = + (ConfigurationSource) servletContext + .getAttribute(CONF_CONTEXT_ATTRIBUTE); boolean access = true; boolean adminAccess = conf.getBoolean( @@ -1405,8 +1415,9 @@ public static boolean isInstrumentationAccessAllowed( public static boolean hasAdministratorAccess( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException { - Configuration conf = - (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); + ConfigurationSource conf = + (ConfigurationSource) servletContext + .getAttribute(CONF_CONTEXT_ATTRIBUTE); // If there is no authorization, anybody has administrator access. if (!conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { @@ -1679,17 +1690,14 @@ private static XFrameOption getEnum(String value) { } } - private Map setHeaders(Configuration conf) { + private Map setHeaders(ConfigurationSource conf) { Map xFrameParams = new HashMap<>(); - Map headerConfigMap = - conf.getValByRegex(HTTP_HEADER_REGEX); xFrameParams.putAll(getDefaultHeaders()); if (this.xFrameOptionIsEnabled) { xFrameParams.put(HTTP_HEADER_PREFIX + X_FRAME_OPTIONS, this.xFrameOption.toString()); } - xFrameParams.putAll(headerConfigMap); return xFrameParams; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java index c2d88cfee42..1428ba38902 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java @@ -29,7 +29,7 @@ import java.security.Principal; import java.util.HashMap; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER; import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER; @@ -125,7 +125,7 @@ public void init(FilterConfig conf) throws ServletException { } @Override - public void initFilter(FilterContainer container, Configuration conf) { + public void initFilter(FilterContainer container, ConfigurationSource conf) { HashMap options = new HashMap(); String username = getUsernameFromConf(conf); @@ -139,7 +139,7 @@ public void initFilter(FilterContainer container, Configuration conf) { /** * Retrieve the static username from the configuration. */ - static String getUsernameFromConf(Configuration conf) { + static String getUsernameFromConf(ConfigurationSource conf) { String oldStyleUgi = conf.get(DEPRECATED_UGI_KEY); if (oldStyleUgi != null) { // We can't use the normal configuration deprecation mechanism here diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java index 17fa39286a0..214adb29c6c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java @@ -24,8 +24,8 @@ import java.util.OptionalInt; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; @@ -87,7 +87,7 @@ private HddsServerUtil() { * @return Target {@code InetSocketAddress} for the SCM service endpoint. */ public static InetSocketAddress getScmAddressForDataNodes( - Configuration conf) { + ConfigurationSource conf) { // We try the following settings in decreasing priority to retrieve the // target host. // - OZONE_SCM_DATANODE_ADDRESS_KEY @@ -118,7 +118,7 @@ public static InetSocketAddress getScmAddressForDataNodes( * @return Target {@code InetSocketAddress} for the SCM client endpoint. */ public static InetSocketAddress getScmClientBindAddress( - Configuration conf) { + ConfigurationSource conf) { final String host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY) .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT); @@ -138,7 +138,7 @@ public static InetSocketAddress getScmClientBindAddress( * @return Target {@code InetSocketAddress} for the SCM block client endpoint. */ public static InetSocketAddress getScmBlockClientBindAddress( - Configuration conf) { + ConfigurationSource conf) { final String host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY) .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT); @@ -158,7 +158,7 @@ public static InetSocketAddress getScmBlockClientBindAddress( * @return Target {@code InetSocketAddress} for the SCM security service. */ public static InetSocketAddress getScmSecurityInetAddress( - Configuration conf) { + ConfigurationSource conf) { final String host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY) .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT); @@ -182,7 +182,7 @@ public static InetSocketAddress getScmSecurityInetAddress( * @return Target {@code InetSocketAddress} for the SCM service endpoint. */ public static InetSocketAddress getScmDataNodeBindAddress( - Configuration conf) { + ConfigurationSource conf) { final Optional host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY); @@ -203,7 +203,7 @@ public static InetSocketAddress getScmDataNodeBindAddress( * @return Target {@code InetSocketAddress} for the SCM service endpoint. */ public static InetSocketAddress getReconDataNodeBindAddress( - Configuration conf) { + ConfigurationSource conf) { final Optional host = getHostNameFromConfigKeys(conf, ReconConfigKeys.OZONE_RECON_DATANODE_BIND_HOST_KEY); @@ -222,7 +222,7 @@ public static InetSocketAddress getReconDataNodeBindAddress( * @param conf - Configuration * @return long in Milliseconds. */ - public static long getScmheartbeatCheckerInterval(Configuration conf) { + public static long getScmheartbeatCheckerInterval(ConfigurationSource conf) { return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); @@ -235,7 +235,7 @@ public static long getScmheartbeatCheckerInterval(Configuration conf) { * @param conf - Ozone Config * @return - HB interval in milli seconds. */ - public static long getScmHeartbeatInterval(Configuration conf) { + public static long getScmHeartbeatInterval(ConfigurationSource conf) { return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL, HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS); } @@ -247,7 +247,7 @@ public static long getScmHeartbeatInterval(Configuration conf) { * @param conf - Configuration. * @return - Long, Milliseconds to wait before flagging a node as stale. */ - public static long getStaleNodeInterval(Configuration conf) { + public static long getStaleNodeInterval(ConfigurationSource conf) { long staleNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL, @@ -284,7 +284,7 @@ public static long getStaleNodeInterval(Configuration conf) { * @param conf - Configuration. * @return - the interval for dead node flagging. */ - public static long getDeadNodeInterval(Configuration conf) { + public static long getDeadNodeInterval(ConfigurationSource conf) { long staleNodeIntervalMs = getStaleNodeInterval(conf); long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, OZONE_SCM_DEADNODE_INTERVAL_DEFAULT, @@ -303,7 +303,7 @@ public static long getDeadNodeInterval(Configuration conf) { * @param conf - Ozone Config * @return - Rpc timeout in Milliseconds. */ - public static long getScmRpcTimeOutInMilliseconds(Configuration conf) { + public static long getScmRpcTimeOutInMilliseconds(ConfigurationSource conf) { return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT, OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); } @@ -314,7 +314,7 @@ public static long getScmRpcTimeOutInMilliseconds(Configuration conf) { * @param conf - Ozone Config * @return - Log warn interval. */ - public static int getLogWarnInterval(Configuration conf) { + public static int getLogWarnInterval(ConfigurationSource conf) { return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT, OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT); } @@ -324,14 +324,15 @@ public static int getLogWarnInterval(Configuration conf) { * @param conf - Conf * @return port number. */ - public static int getContainerPort(Configuration conf) { + public static int getContainerPort(ConfigurationSource conf) { return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT); } - public static String getOzoneDatanodeRatisDirectory(Configuration conf) { + public static String getOzoneDatanodeRatisDirectory( + ConfigurationSource conf) { String storageDir = conf.get( - OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); + OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); if (Strings.isNullOrEmpty(storageDir)) { storageDir = ServerUtils.getDefaultRatisDirectory(conf); @@ -339,15 +340,13 @@ public static String getOzoneDatanodeRatisDirectory(Configuration conf) { return storageDir; } - - /** * Get the path for datanode id file. * * @param conf - Configuration * @return the path of datanode id as string */ - public static String getDatanodeIdFilePath(Configuration conf) { + public static String getDatanodeIdFilePath(ConfigurationSource conf) { String dataNodeIDDirPath = conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR); if (dataNodeIDDirPath == null) { @@ -404,7 +403,7 @@ public static SCMSecurityProtocolClientSideTranslatorPB getScmSecurityClient( * @throws IllegalArgumentException if configuration is not defined or invalid */ public static InetSocketAddress getScmAddressForSecurityProtocol( - Configuration conf) { + ConfigurationSource conf) { Optional host = getHostNameFromConfigKeys(conf, ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY, ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java index dafa92b6197..d697fdfaccd 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java @@ -24,7 +24,7 @@ import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -34,7 +34,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF; - import org.iq80.leveldb.Options; import org.rocksdb.BlockBasedTableConfig; import org.rocksdb.Statistics; @@ -52,11 +51,11 @@ public class MetadataStoreBuilder { private File dbFile; private long cacheSize; private boolean createIfMissing = true; - private Optional optionalConf = Optional.empty(); + private Optional optionalConf = Optional.empty(); private String dbType; @VisibleForTesting - public static final Map CACHED_OPTS = - new ConcurrentHashMap<>(); + public static final Map + CACHED_OPTS = new ConcurrentHashMap<>(); @VisibleForTesting public static final OzoneConfiguration DEFAULT_CONF = new OzoneConfiguration(); @@ -80,7 +79,7 @@ public MetadataStoreBuilder setCreateIfMissing(boolean doCreate) { return this; } - public MetadataStoreBuilder setConf(Configuration configuration) { + public MetadataStoreBuilder setConf(ConfigurationSource configuration) { this.optionalConf = Optional.of(configuration); return this; } @@ -102,7 +101,7 @@ public MetadataStore build() throws IOException { } // Build db store based on configuration - final Configuration conf = optionalConf.orElse(DEFAULT_CONF); + final ConfigurationSource conf = optionalConf.orElse(DEFAULT_CONF); if (dbType == null) { LOG.debug("dbType is null, using "); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index c9906d1c42b..88d509acf8f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -29,7 +29,7 @@ import java.util.List; import java.util.Set; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -73,7 +73,7 @@ public final class DBStoreBuilder { private String dbname; private Path dbPath; private List tableNames; - private Configuration configuration; + private ConfigurationSource configuration; private CodecRegistry registry; private String rocksDbStat; private RocksDBConfiguration rocksDBConfiguration; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java index 9735d2cab97..1ebde84d51c 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java @@ -17,21 +17,20 @@ */ package org.apache.hadoop.hdds.server; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; +import java.io.File; + import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.test.PathUtils; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import java.io.File; +import org.apache.commons.io.FileUtils; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; /** * Unit tests for {@link ServerUtils}. @@ -49,7 +48,7 @@ public void testGetScmDbDir() { final File testDir = PathUtils.getTestDir(TestServerUtils.class); final File dbDir = new File(testDir, "scmDbDir"); final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); @@ -72,7 +71,7 @@ public void testGetScmDbDir() { public void testGetScmDbDirWithFallback() { final File testDir = PathUtils.getTestDir(TestServerUtils.class); final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); try { assertFalse(metaDir.exists()); @@ -99,7 +98,7 @@ public void ozoneMetadataDirIsMandatory() { public void ozoneMetadataDirAcceptsSingleItem() { final File testDir = PathUtils.getTestDir(TestServerUtils.class); final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); try { @@ -113,7 +112,7 @@ public void ozoneMetadataDirAcceptsSingleItem() { @Test public void ozoneMetadataDirRejectsList() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, "/data/meta1,/data/meta2"); thrown.expect(IllegalArgumentException.class); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java index d1832e285d6..1730df6a04b 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.server.http; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.junit.Assert; import org.junit.Test; @@ -28,7 +28,7 @@ public class TestBaseHttpServer { @Test public void getBindAddress() throws Exception { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set("enabled", "false"); BaseHttpServer baseHttpServer = new BaseHttpServer(conf, "test") { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java index bc53c7a4cdd..3eb832f2108 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java @@ -28,7 +28,6 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; @@ -90,7 +89,7 @@ public void init() throws IOException { testDir = GenericTestUtils.getTestDir(getClass().getSimpleName() + "-" + storeImpl.toLowerCase()); - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); store = MetadataStoreBuilder.newBuilder() @@ -110,7 +109,7 @@ public void init() throws IOException { @Test public void testIterator() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getRandomizedTestDir(); MetadataStore dbStore = MetadataStoreBuilder.newBuilder() @@ -166,7 +165,7 @@ public void testIterator() throws Exception { @Test public void testMetaStoreConfigDifferentFromType() throws IOException { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); String dbType; GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); @@ -193,7 +192,7 @@ public void testMetaStoreConfigDifferentFromType() throws IOException { @Test public void testdbTypeNotSet() throws IOException { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG); GenericTestUtils.LogCapturer logCapturer = @@ -460,7 +459,7 @@ public void testInvalidStartKey() throws IOException { @Test public void testDestroyDB() throws IOException { // create a new DB to test db destroy - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() @@ -485,7 +484,7 @@ public void testDestroyDB() throws IOException { @Test public void testBatchWrite() throws IOException { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl); File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName() diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java index 29c780304cb..610e898a2d7 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java @@ -17,7 +17,13 @@ */ package org.apache.hadoop.hdds.utils; -import org.apache.hadoop.conf.Configuration; +import javax.management.MBeanServer; +import java.io.File; +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.metrics2.AbstractMetric; import org.apache.hadoop.metrics2.MetricsCollector; @@ -29,27 +35,20 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import javax.management.MBeanServer; -import java.io.File; -import java.io.IOException; -import java.lang.management.ManagementFactory; -import java.util.HashMap; -import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; +import org.junit.Assert; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import org.junit.Before; +import org.junit.Test; /** * Test the JMX interface for the rocksdb metastore implementation. */ public class TestRocksDBStoreMBean { - private Configuration conf; + private OzoneConfiguration conf; @Before public void init() throws Exception { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java index 25457f72bc8..7f954b7daeb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java @@ -17,21 +17,22 @@ package org.apache.hadoop.hdds.scm; -import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.stream.Collectors; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; + +import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.stream.Collectors; - /** * This policy implements a set of invariants which are common * for all basic placement policies, acts as the repository of helper @@ -43,15 +44,16 @@ public abstract class SCMCommonPlacementPolicy implements PlacementPolicy { LoggerFactory.getLogger(SCMCommonPlacementPolicy.class); private final NodeManager nodeManager; private final Random rand; - private final Configuration conf; + private final ConfigurationSource conf; /** * Constructor. * * @param nodeManager NodeManager - * @param conf Configuration class. + * @param conf Configuration class. */ - public SCMCommonPlacementPolicy(NodeManager nodeManager, Configuration conf) { + public SCMCommonPlacementPolicy(NodeManager nodeManager, + ConfigurationSource conf) { this.nodeManager = nodeManager; this.rand = new Random(); this.conf = conf; @@ -80,7 +82,7 @@ public Random getRand() { * * @return Configuration */ - public Configuration getConf() { + public ConfigurationSource getConf() { return conf; } @@ -95,11 +97,10 @@ public Configuration getConf() { * 3. if a set of containers are requested, we either meet the required * number of nodes or we fail that request. * - * * @param excludedNodes - datanodes with existing replicas - * @param favoredNodes - list of nodes preferred. + * @param favoredNodes - list of nodes preferred. * @param nodesRequired - number of datanodes required. - * @param sizeRequired - size required for the container or block. + * @param sizeRequired - size required for the container or block. * @return list of datanodes chosen. * @throws SCMException SCM exception. */ @@ -162,7 +163,7 @@ public boolean hasEnoughSpace(DatanodeDetails datanodeDetails, * expected number of nodes. * * @param nodesRequired - Nodes Required - * @param healthyNodes - List of Nodes in the result set. + * @param healthyNodes - List of Nodes in the result set. * @return List of Datanodes that can be used for placement. * @throws SCMException SCMException */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 8c09c535110..e81229d10f0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -16,6 +16,7 @@ */ package org.apache.hadoop.hdds.scm.block; +import javax.management.ObjectName; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -23,19 +24,16 @@ import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; -import javax.management.ObjectName; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ContainerBlockID; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmUtils; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; -import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; @@ -44,24 +42,21 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; +import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; +import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.hdds.utils.UniqueId; + +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .INVALID_BLOCK_SIZE; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; -import static org.apache.hadoop.ozone.OzoneConfigKeys - .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; - /** Block Manager manages the block access for SCM. */ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { @@ -89,7 +84,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { * @param scm * @throws IOException */ - public BlockManagerImpl(final Configuration conf, + public BlockManagerImpl(final ConfigurationSource conf, final StorageContainerManager scm) { Objects.requireNonNull(scm, "SCM cannot be null"); this.pipelineManager = scm.getPipelineManager(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index 1752e2a06b5..08639baaac1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -30,7 +30,7 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult; @@ -77,7 +77,7 @@ public class DeletedBlockLogImpl // Maps txId to set of DNs which are successful in committing the transaction private Map> transactionToDNsCommitMap; - public DeletedBlockLogImpl(Configuration conf, + public DeletedBlockLogImpl(ConfigurationSource conf, ContainerManager containerManager, SCMMetadataStore scmMetadataStore) { maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index 74db22d6652..5ca75d2a116 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -18,7 +18,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.node.NodeManager; @@ -83,7 +83,7 @@ public class SCMBlockDeletingService extends BackgroundService { public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog, ContainerManager containerManager, NodeManager nodeManager, EventPublisher eventPublisher, long interval, long serviceTimeout, - Configuration conf) { + ConfigurationSource conf) { super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS, BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout); this.deletedBlockLog = deletedBlockLog; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 42654326cf1..5a22521d3dd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -17,9 +17,6 @@ package org.apache.hadoop.hdds.scm.container; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .FAILED_TO_CHANGE_CONTAINER_STATE; - import java.io.IOException; import java.util.HashSet; import java.util.List; @@ -29,8 +26,8 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -43,15 +40,15 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.base.Preconditions; import com.google.common.util.concurrent.AtomicLongMap; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_CHANGE_CONTAINER_STATE; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A container state manager keeps track of container states and returns @@ -132,7 +129,7 @@ public class ContainerStateManager { * TODO : Add Container Tags so we know which containers are owned by SCM. */ @SuppressWarnings("unchecked") - public ContainerStateManager(final Configuration configuration) { + public ContainerStateManager(final ConfigurationSource configuration) { // Initialize the container state machine. final Set finalStates = new HashSet(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 3838b9df21b..38c3d1189ee 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -16,29 +16,6 @@ */ package org.apache.hadoop.hdds.scm.container; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.server.ServerUtils; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.utils.BatchOperation; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -53,9 +30,32 @@ import java.util.concurrent.locks.ReentrantLock; import java.util.stream.Collectors; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.BatchOperation; +import org.apache.hadoop.hdds.utils.MetadataStore; +import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; +import org.apache.hadoop.ozone.OzoneConsts; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.primitives.Longs; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * ContainerManager class contains the mapping from a name to a pipeline @@ -81,11 +81,11 @@ public class SCMContainerManager implements ContainerManager { * passed to LevelDB and this memory is allocated in Native code space. * CacheSize is specified * in MB. - * @param conf - {@link Configuration} + * @param conf - {@link ConfigurationSource} * @param pipelineManager - {@link PipelineManager} * @throws IOException on Failure. */ - public SCMContainerManager(final Configuration conf, + public SCMContainerManager(final ConfigurationSource conf, PipelineManager pipelineManager) throws IOException { @@ -612,7 +612,7 @@ public void notifyContainerReportProcessing(boolean isFullReport, } } - protected File getContainerDBPath(Configuration conf) { + protected File getContainerDBPath(ConfigurationSource conf) { File metaDir = ServerUtils.getScmDbDir(conf); return new File(metaDir, SCM_CONTAINER_DB); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java index a3024dff0bd..68337657014 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java @@ -16,17 +16,18 @@ * limitations under the License. */ package org.apache.hadoop.hdds.scm.container.placement.algorithms; -import org.apache.hadoop.conf.Configuration; +import java.lang.reflect.Constructor; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.NodeManager; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.lang.reflect.Constructor; - /** * A factory to create container placement instance based on configuration * property ozone.scm.container.placement.classname. @@ -44,7 +45,7 @@ private ContainerPlacementPolicyFactory() { public static PlacementPolicy getPolicy( - Configuration conf, final NodeManager nodeManager, + ConfigurationSource conf, final NodeManager nodeManager, NetworkTopology clusterMap, final boolean fallback, SCMContainerPlacementMetrics metrics) throws SCMException{ final Class placementClass = conf @@ -54,7 +55,7 @@ public static PlacementPolicy getPolicy( Constructor constructor; try { constructor = placementClass.getDeclaredConstructor(NodeManager.class, - Configuration.class, NetworkTopology.class, boolean.class, + ConfigurationSource.class, NetworkTopology.class, boolean.class, SCMContainerPlacementMetrics.class); LOG.info("Create container placement policy of type {}", placementClass.getCanonicalName()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java index 19093448b92..7d2db05c055 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java @@ -19,7 +19,7 @@ import java.util.List; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; @@ -80,7 +80,7 @@ public final class SCMContainerPlacementCapacity * @param conf Configuration */ public SCMContainerPlacementCapacity(final NodeManager nodeManager, - final Configuration conf, final NetworkTopology networkTopology, + final ConfigurationSource conf, final NetworkTopology networkTopology, final boolean fallback, final SCMContainerPlacementMetrics metrics) { super(nodeManager, conf); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java index 8933fe953a7..72193ff1f07 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java @@ -19,7 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -68,7 +68,7 @@ public final class SCMContainerPlacementRackAware * for closed container placement. */ public SCMContainerPlacementRackAware(final NodeManager nodeManager, - final Configuration conf, final NetworkTopology networkTopology, + final ConfigurationSource conf, final NetworkTopology networkTopology, final boolean fallback, final SCMContainerPlacementMetrics metrics) { super(nodeManager, conf); this.networkTopology = networkTopology; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java index ce5d10d4e51..4927517338c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.container.placement.algorithms; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -52,7 +52,7 @@ public final class SCMContainerPlacementRandom extends SCMCommonPlacementPolicy * @param conf Config */ public SCMContainerPlacementRandom(final NodeManager nodeManager, - final Configuration conf, final NetworkTopology networkTopology, + final ConfigurationSource conf, final NetworkTopology networkTopology, final boolean fallback, final SCMContainerPlacementMetrics metrics) { super(nodeManager, conf); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java index 1dc924b2575..a40a63a1dc7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java @@ -6,9 +6,9 @@ * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

* Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.node; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventHandler; @@ -30,16 +30,17 @@ public class NewNodeHandler implements EventHandler { private final PipelineManager pipelineManager; - private final Configuration conf; + private final ConfigurationSource conf; - public NewNodeHandler(PipelineManager pipelineManager, Configuration conf) { + public NewNodeHandler(PipelineManager pipelineManager, + ConfigurationSource conf) { this.pipelineManager = pipelineManager; this.conf = conf; } @Override public void onMessage(DatanodeDetails datanodeDetails, - EventPublisher publisher) { + EventPublisher publisher) { pipelineManager.triggerPipelineCreation(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java index cacf077c6fa..b6248aa817d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java @@ -18,43 +18,47 @@ package org.apache.hadoop.hdds.scm.node; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.ThreadFactoryBuilder; -import org.apache.hadoop.conf.Configuration; +import java.io.Closeable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.function.Predicate; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.events.SCMEvents; -import org.apache.hadoop.hdds.scm.node.states.*; import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; +import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; +import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; +import org.apache.hadoop.hdds.scm.node.states.NodeStateMap; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.server.events.Event; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.ozone.common.statemachine - .InvalidStateTransitionException; +import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.concurrent.HadoopExecutors; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.Closeable; -import java.util.*; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.function.Predicate; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DEADNODE_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_STALENODE_INTERVAL; - /** * NodeStateManager maintains the state of all the datanodes in the cluster. All * the node state change should happen only via NodeStateManager. It also @@ -144,7 +148,8 @@ private enum NodeLifeCycleEvent { * * @param conf Configuration */ - public NodeStateManager(Configuration conf, EventPublisher eventPublisher) { + public NodeStateManager(ConfigurationSource conf, + EventPublisher eventPublisher) { this.nodeStateMap = new NodeStateMap(); this.node2PipelineMap = new Node2PipelineMap(); this.eventPublisher = eventPublisher; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java index 5976c17a607..cc32f8452c7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.node; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; @@ -31,7 +31,7 @@ public class NonHealthyToHealthyNodeHandler implements EventHandler { private final PipelineManager pipelineManager; - private final Configuration conf; + private final ConfigurationSource conf; public NonHealthyToHealthyNodeHandler( PipelineManager pipelineManager, OzoneConfiguration conf) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java index 26e8f5fb279..5530e7305e0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.node; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -41,7 +41,7 @@ public class StaleNodeHandler implements EventHandler { private final NodeManager nodeManager; private final PipelineManager pipelineManager; - private final Configuration conf; + private final ConfigurationSource conf; public StaleNodeHandler(NodeManager nodeManager, PipelineManager pipelineManager, OzoneConfiguration conf) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java index 001d185f944..f7f1d52f9ef 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -41,11 +41,11 @@ class BackgroundPipelineCreator { private final Scheduler scheduler; private final AtomicBoolean isPipelineCreatorRunning; private final PipelineManager pipelineManager; - private final Configuration conf; + private final ConfigurationSource conf; private ScheduledFuture periodicTask; BackgroundPipelineCreator(PipelineManager pipelineManager, - Scheduler scheduler, Configuration conf) { + Scheduler scheduler, ConfigurationSource conf) { this.pipelineManager = pipelineManager; this.conf = conf; this.scheduler = scheduler; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java index ae449dcbf2f..07206943e68 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo; @@ -44,7 +44,7 @@ public class PipelineActionHandler LoggerFactory.getLogger(PipelineActionHandler.class); private final PipelineManager pipelineManager; - private final Configuration ozoneConf; + private final ConfigurationSource ozoneConf; public PipelineActionHandler(PipelineManager pipelineManager, OzoneConfiguration conf) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index 9e5353a10ca..e1cf382d1a2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import com.google.common.annotations.VisibleForTesting; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; @@ -40,7 +40,7 @@ public class PipelineFactory { private Map providers; PipelineFactory(NodeManager nodeManager, PipelineStateManager stateManager, - Configuration conf, EventPublisher eventPublisher) { + ConfigurationSource conf, EventPublisher eventPublisher) { providers = new HashMap<>(); providers.put(ReplicationType.STAND_ALONE, new SimplePipelineProvider(nodeManager, stateManager)); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index e96b12026c3..b6a68583f38 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -20,7 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -54,7 +54,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy { LoggerFactory.getLogger(PipelinePlacementPolicy.class); private final NodeManager nodeManager; private final PipelineStateManager stateManager; - private final Configuration conf; + private final ConfigurationSource conf; private final int heavyNodeCriteria; /** @@ -66,7 +66,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy { * @param conf Configuration */ public PipelinePlacementPolicy(final NodeManager nodeManager, - final PipelineStateManager stateManager, final Configuration conf) { + final PipelineStateManager stateManager, final ConfigurationSource conf) { super(nodeManager, conf); this.nodeManager = nodeManager; this.conf = conf; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java index 1a93f225598..f45b3a9120b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java @@ -20,7 +20,7 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -50,13 +50,13 @@ public class PipelineReportHandler implements private static final Logger LOGGER = LoggerFactory.getLogger( PipelineReportHandler.class); private final PipelineManager pipelineManager; - private final Configuration conf; + private final ConfigurationSource conf; private final SafeModeManager scmSafeModeManager; private final boolean pipelineAvailabilityCheck; private final SCMPipelineMetrics metrics; public PipelineReportHandler(SafeModeManager scmSafeModeManager, - PipelineManager pipelineManager, Configuration conf) { + PipelineManager pipelineManager, ConfigurationSource conf) { Preconditions.checkNotNull(pipelineManager); this.scmSafeModeManager = scmSafeModeManager; this.pipelineManager = pipelineManager; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 9d7c996ed86..4d915411708 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -18,11 +18,14 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -32,12 +35,10 @@ import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; -import java.util.List; - /** * Implements Api for creating ratis pipelines. */ @@ -46,14 +47,14 @@ public class RatisPipelineProvider extends PipelineProvider { private static final Logger LOG = LoggerFactory.getLogger(RatisPipelineProvider.class); - private final Configuration conf; + private final ConfigurationSource conf; private final EventPublisher eventPublisher; private final PipelinePlacementPolicy placementPolicy; private int pipelineNumberLimit; private int maxPipelinePerDatanode; RatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, Configuration conf, + PipelineStateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { super(nodeManager, stateManager); this.conf = conf; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index db9260e6039..5c9b202ff62 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -21,11 +21,12 @@ import java.util.List; import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; + import org.apache.ratis.client.RaftClient; import org.apache.ratis.grpc.GrpcTlsConfig; import org.apache.ratis.protocol.RaftGroup; @@ -56,7 +57,8 @@ private RatisPipelineUtils() { * @param grpcTlsConfig * @throws IOException */ - public static void destroyPipeline(Pipeline pipeline, Configuration ozoneConf, + public static void destroyPipeline(Pipeline pipeline, + ConfigurationSource ozoneConf, GrpcTlsConfig grpcTlsConfig) { final RaftGroup group = RatisHelper.newRaftGroup(pipeline); if (LOG.isDebugEnabled()) { @@ -82,7 +84,8 @@ public static void destroyPipeline(Pipeline pipeline, Configuration ozoneConf, * @throws IOException */ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, - Configuration ozoneConf, GrpcTlsConfig grpcTlsConfig) throws IOException { + ConfigurationSource ozoneConf, GrpcTlsConfig grpcTlsConfig) + throws IOException { final String rpcType = ozoneConf .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 26908b189d3..200b35820eb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -18,10 +18,25 @@ package org.apache.hadoop.hdds.scm.pipeline; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import javax.management.ObjectName; +import java.io.File; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; + import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; @@ -33,34 +48,19 @@ import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataStore; import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; import org.apache.hadoop.hdds.utils.Scheduler; +import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import javax.management.ObjectName; -import java.io.File; -import java.io.IOException; -import java.time.Duration; -import java.time.Instant; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.NavigableSet; -import java.util.Set; -import java.util.Collection; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Implements api needed for management of pipelines. All the write operations @@ -82,7 +82,7 @@ public class SCMPipelineManager implements PipelineManager { private final EventPublisher eventPublisher; private final NodeManager nodeManager; private final SCMPipelineMetrics metrics; - private final Configuration conf; + private final ConfigurationSource conf; private long pipelineWaitDefaultTimeout; // Pipeline Manager MXBean private ObjectName pmInfoBean; @@ -92,7 +92,7 @@ public class SCMPipelineManager implements PipelineManager { // to prevent pipelines being created until sufficient nodes have registered. private final AtomicBoolean pipelineCreationAllowed; - public SCMPipelineManager(Configuration conf, NodeManager nodeManager, + public SCMPipelineManager(ConfigurationSource conf, NodeManager nodeManager, EventPublisher eventPublisher) throws IOException { this(conf, nodeManager, eventPublisher, null, null); @@ -102,7 +102,8 @@ public SCMPipelineManager(Configuration conf, NodeManager nodeManager, initializePipelineState(); } - protected SCMPipelineManager(Configuration conf, NodeManager nodeManager, + protected SCMPipelineManager(ConfigurationSource conf, + NodeManager nodeManager, EventPublisher eventPublisher, PipelineStateManager pipelineStateManager, PipelineFactory pipelineFactory) @@ -637,7 +638,7 @@ public void close() throws IOException { pipelineFactory.shutdown(); } - protected File getPipelineDBPath(Configuration configuration) { + protected File getPipelineDBPath(ConfigurationSource configuration) { File metaDir = ServerUtils.getScmDbDir(configuration); return new File(metaDir, SCM_PIPELINE_DB); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index 8eadeb35543..2d7466ed133 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -24,7 +24,7 @@ import java.util.concurrent.atomic.AtomicLong; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -51,7 +51,7 @@ public class ContainerSafeModeRule extends private AtomicLong containerWithMinReplicas = new AtomicLong(0); public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, - Configuration conf, + ConfigurationSource conf, List containers, SCMSafeModeManager manager) { super(manager, ruleName, eventQueue); safeModeCutoff = conf.getDouble( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java index 1029d711f8b..0afbd2749f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java @@ -20,7 +20,7 @@ import java.util.HashSet; import java.util.UUID; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport; @@ -42,7 +42,7 @@ public class DataNodeSafeModeRule extends private HashSet registeredDnSet; public DataNodeSafeModeRule(String ruleName, EventQueue eventQueue, - Configuration conf, + ConfigurationSource conf, SCMSafeModeManager manager) { super(manager, ruleName, eventQueue); requiredDns = conf.getInt( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java index 688125f6e6c..bd58a0633ed 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java @@ -19,20 +19,21 @@ import java.util.HashSet; import java.util.Set; -import org.apache.hadoop.conf.Configuration; + import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.hdds.server.events.TypedEvent; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Class defining Safe mode exit criteria for Pipelines. @@ -52,7 +53,7 @@ public class HealthyPipelineSafeModeRule extends SafeModeExitRule { HealthyPipelineSafeModeRule(String ruleName, EventQueue eventQueue, PipelineManager pipelineManager, - SCMSafeModeManager manager, Configuration configuration) { + SCMSafeModeManager manager, ConfigurationSource configuration) { super(manager, ruleName, eventQueue); healthyPipelinesPercent = configuration.getDouble(HddsConfigKeys. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java index 0783d0270a9..bce4af56cc0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java @@ -19,7 +19,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.events.SCMEvents; @@ -54,7 +54,7 @@ public class OneReplicaPipelineSafeModeRule extends public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue, PipelineManager pipelineManager, - SCMSafeModeManager safeModeManager, Configuration configuration) { + SCMSafeModeManager safeModeManager, ConfigurationSource configuration) { super(safeModeManager, ruleName, eventQueue); double percent = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index 8537761c21e..c72d7752bc0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -17,21 +17,22 @@ */ package org.apache.hadoop.hdds.scm.safemode; -import com.google.common.annotations.VisibleForTesting; - import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.conf.Configuration; + import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; + +import com.google.common.annotations.VisibleForTesting; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -85,7 +86,7 @@ public class SCMSafeModeManager implements SafeModeManager { private Map exitRules = new HashMap(1); private Set preCheckRules = new HashSet<>(1); - private Configuration config; + private ConfigurationSource config; private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; private static final String DN_EXIT_RULE = "DataNodeSafeModeRule"; private static final String HEALTHY_PIPELINE_EXIT_RULE = @@ -101,7 +102,7 @@ public class SCMSafeModeManager implements SafeModeManager { private final SafeModeMetrics safeModeMetrics; - public SCMSafeModeManager(Configuration conf, + public SCMSafeModeManager(ConfigurationSource conf, List allContainers, PipelineManager pipelineManager, EventQueue eventQueue) { this.config = conf; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java index ef70b00c597..bbf8b3d5a64 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java @@ -17,7 +17,7 @@ package org.apache.hadoop.hdds.scm.safemode; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.server.events.EventHandler; @@ -51,7 +51,7 @@ public class SafeModeHandler implements EventHandler { * SafeModeHandler, to handle the logic once we exit safe mode. * @param configuration */ - public SafeModeHandler(Configuration configuration) { + public SafeModeHandler(ConfigurationSource configuration) { this.waitTime = configuration.getTimeDuration( HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java index b63d04e9bcd..6a0001c673c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.safemode; import java.util.concurrent.atomic.AtomicBoolean; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps; import org.apache.hadoop.hdds.scm.exceptions.SCMException; @@ -33,7 +33,7 @@ public class SafeModePrecheck implements Precheck { private AtomicBoolean inSafeMode; public static final String PRECHECK_TYPE = "SafeModePrecheck"; - public SafeModePrecheck(Configuration conf) { + public SafeModePrecheck(ConfigurationSource conf) { boolean safeModeEnabled = conf.getBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 383e7ce75a4..1f2305f0791 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -21,36 +21,33 @@ */ package org.apache.hadoop.hdds.scm.server; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.protobuf.BlockingService; - +import javax.management.ObjectName; +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; import java.util.Objects; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; -import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.BlockManager; import org.apache.hadoop.hdds.scm.block.BlockManagerImpl; import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl; import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler; -import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementMetrics; -import org.apache.hadoop.hdds.scm.net.NetworkTopology; -import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; -import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler; import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler; import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler; @@ -59,27 +56,33 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.container.ContainerReportHandler; import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler; +import org.apache.hadoop.hdds.scm.container.ReplicationManager; +import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics; import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics; -import org.apache.hadoop.hdds.scm.container.ReplicationManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreRDBImpl; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.node.DeadNodeHandler; import org.apache.hadoop.hdds.scm.node.NewNodeHandler; -import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodeReportHandler; +import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler; import org.apache.hadoop.hdds.scm.node.SCMNodeManager; import org.apache.hadoop.hdds.scm.node.StaleNodeHandler; import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; +import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer; @@ -88,6 +91,9 @@ import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.HddsServerUtil; +import org.apache.hadoop.hdds.utils.HddsVersionInfo; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RPC; @@ -104,23 +110,17 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.JvmPauseMonitor; -import org.apache.hadoop.hdds.utils.HddsVersionInfo; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.protobuf.BlockingService; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; import org.apache.ratis.grpc.GrpcTlsConfig; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.management.ObjectName; -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.util.Collection; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; - /** * StorageContainerManager is the main entry point for the service that * provides information about @@ -505,7 +505,7 @@ private void initalizeMetadataStore(OzoneConfiguration conf, * * @param conf */ - private void loginAsSCMUser(Configuration conf) + private void loginAsSCMUser(ConfigurationSource conf) throws IOException, AuthenticationException { if (LOG.isDebugEnabled()) { ScmConfig scmConfig = configuration.getObject(ScmConfig.class); @@ -515,18 +515,20 @@ private void loginAsSCMUser(Configuration conf) scmConfig.getKerberosKeytab()); } - if (SecurityUtil.getAuthenticationMethod(conf).equals( + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + if (SecurityUtil.getAuthenticationMethod(hadoopConf).equals( AuthenticationMethod.KERBEROS)) { - UserGroupInformation.setConfiguration(conf); + UserGroupInformation.setConfiguration(hadoopConf); InetSocketAddress socAddr = HddsServerUtil .getScmBlockClientBindAddress(conf); - SecurityUtil.login(conf, + SecurityUtil.login(hadoopConf, ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY, ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY, socAddr.getHostName()); } else { throw new AuthenticationException(SecurityUtil.getAuthenticationMethod( - conf) + " authentication method not support. " + hadoopConf) + " authentication method not support. " + "SCM user login failed."); } LOG.info("SCM login successful."); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java index 3f963fd2a39..80b3eb9f0e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java @@ -17,18 +17,18 @@ package org.apache.hadoop.hdds.scm.server; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.server.http.BaseHttpServer; -import java.io.IOException; - /** * HttpServer2 wrapper for the Ozone Storage Container Manager. */ public class StorageContainerManagerHttpServer extends BaseHttpServer { - public StorageContainerManagerHttpServer(Configuration conf) + public StorageContainerManagerHttpServer(ConfigurationSource conf) throws IOException { super(conf, "scm"); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java index 843586c6886..b3cb668bb62 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java @@ -19,20 +19,18 @@ package org.apache.hadoop.hdds.scm; -import org.apache.hadoop.conf.Configuration; +import java.net.InetSocketAddress; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.HddsServerUtil; +import static org.hamcrest.core.Is.is; +import static org.junit.Assert.assertThat; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; -import java.net.InetSocketAddress; - -import static org.hamcrest.core.Is.is; -import static org.junit.Assert.assertThat; - /** * Test the HDDS server side utilities. */ @@ -50,7 +48,7 @@ public class TestHddsServerUtil { */ @Test public void testMissingScmDataNodeAddress() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); thrown.expect(IllegalArgumentException.class); HddsServerUtil.getScmAddressForDataNodes(conf); } @@ -62,7 +60,7 @@ public void testMissingScmDataNodeAddress() { */ @Test public void testGetScmDataNodeAddress() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); // First try a client address with just a host name. Verify it falls // back to the default port. @@ -110,7 +108,7 @@ public void testGetScmDataNodeAddress() { */ @Test public void testScmClientBindHostDefault() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY // is set differently. @@ -156,7 +154,7 @@ public void testScmClientBindHostDefault() { */ @Test public void testScmDataNodeBindHostDefault() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY // is set differently. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java index d4cc5a4db40..56d265a8586 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hdds.scm; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.net.InetSocketAddress; +import java.util.concurrent.TimeUnit; + import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.ServerUtils; @@ -26,6 +29,12 @@ import org.apache.hadoop.test.PathUtils; import org.apache.commons.io.FileUtils; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import org.junit.Rule; import org.junit.Test; @@ -34,17 +43,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.File; -import java.net.InetSocketAddress; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.junit.Assert.assertEquals; - /** * Unit tests for {@link HddsServerUtil}. */ @@ -65,7 +63,7 @@ public class TestHddsServerUtils { @SuppressWarnings("StringSplitter") public void testGetDatanodeAddressWithPort() { final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); @@ -79,7 +77,7 @@ public void testGetDatanodeAddressWithPort() { @Test public void testGetDatanodeAddressWithoutPort() { final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); @@ -94,7 +92,7 @@ public void testGetDatanodeAddressWithoutPort() { @Test public void testDatanodeAddressFallbackToClientNoPort() { final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); @@ -111,7 +109,7 @@ public void testDatanodeAddressFallbackToClientNoPort() { @SuppressWarnings("StringSplitter") public void testDatanodeAddressFallbackToClientWithPort() { final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); @@ -126,7 +124,7 @@ public void testDatanodeAddressFallbackToClientWithPort() { @Test public void testDatanodeAddressFallbackToScmNamesNoPort() { final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); @@ -143,7 +141,7 @@ public void testDatanodeAddressFallbackToScmNamesNoPort() { @SuppressWarnings("StringSplitter") public void testDatanodeAddressFallbackToScmNamesWithPort() { final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsServerUtil.getScmAddressForDataNodes(conf); @@ -158,7 +156,7 @@ public void testDatanodeAddressFallbackToScmNamesWithPort() { @Test public void testClientFailsWithMultipleScmNames() { final String scmHost = "host123,host456"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); thrown.expect(IllegalArgumentException.class); HddsServerUtil.getScmAddressForDataNodes(conf); @@ -172,7 +170,7 @@ public void testGetScmDbDir() { final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class); final File dbDir = new File(testDir, "scmDbDir"); final File metaDir = new File(testDir, "metaDir"); // should be ignored. - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); @@ -192,7 +190,7 @@ public void testGetScmDbDir() { public void testGetScmDbDirWithFallback() { final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class); final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); try { assertEquals(metaDir, ServerUtils.getScmDbDir(conf)); @@ -210,7 +208,7 @@ public void testNoScmDbDirConfigured() { @Test public void testGetStaleNodeInterval() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); // Reset OZONE_SCM_STALENODE_INTERVAL to 300s that // larger than max limit value. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java index 03768644710..24c144979c5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java @@ -26,8 +26,8 @@ import java.util.Arrays; import java.util.Collection; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.http.HttpConfig; @@ -54,7 +54,7 @@ public class TestStorageContainerManagerHttpServer { .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName()); private static String keystoresDir; private static String sslConfDir; - private static Configuration conf; + private static OzoneConfiguration conf; private static URLConnectionFactory connectionFactory; @Parameters public static Collection policy() { @@ -76,7 +76,7 @@ public TestStorageContainerManagerHttpServer(Policy policy) { File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); - conf = new Configuration(); + conf = new OzoneConfiguration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( TestStorageContainerManagerHttpServer.class); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index 88917522de6..f5675006cc9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -17,11 +17,13 @@ package org.apache.hadoop.hdds.scm.container; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; + import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -33,25 +35,23 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import java.io.File; -import java.io.IOException; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; +import org.apache.commons.lang3.RandomUtils; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER; import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; /** * Tests the closeContainerEventHandler class. */ public class TestCloseContainerEventHandler { - private static Configuration configuration; + private static OzoneConfiguration configuration; private static MockNodeManager nodeManager; private static SCMPipelineManager pipelineManager; private static SCMContainerManager containerManager; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index 41585bc8f7d..c7ec835e55b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -16,7 +16,7 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -59,7 +59,7 @@ public class TestContainerReportHandler { @Before public void setup() throws IOException { - final Configuration conf = new OzoneConfiguration(); + final ConfigurationSource conf = new OzoneConfiguration(); this.nodeManager = new MockNodeManager(true, 10); this.containerManager = Mockito.mock(ContainerManager.class); this.containerStateManager = new ContainerStateManager(conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java index 7961ba8ec74..3c9e7b67f64 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -55,7 +55,7 @@ public class TestIncrementalContainerReportHandler { @Before public void setup() throws IOException { - final Configuration conf = new OzoneConfiguration(); + final ConfigurationSource conf = new OzoneConfiguration(); this.containerManager = Mockito.mock(ContainerManager.class); this.nodeManager = Mockito.mock(NodeManager.class); this.containerStateManager = new ContainerStateManager(conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index 87d76558d27..9a48dbbe158 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -71,7 +71,7 @@ public class TestReplicationManager { @Before public void setup() throws IOException, InterruptedException { - final Configuration conf = new OzoneConfiguration(); + final ConfigurationSource conf = new OzoneConfiguration(); final ContainerManager containerManager = Mockito.mock(ContainerManager.class); eventQueue = new EventQueue(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java index ac4e8d17a0c..75d27124ee8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java @@ -16,23 +16,41 @@ */ package org.apache.hadoop.hdds.scm.container; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import java.util.Random; +import java.util.Set; +import java.util.TreeSet; +import java.util.UUID; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.protocol.proto - .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; + import org.junit.AfterClass; import org.junit.Assert; import org.junit.Before; @@ -41,25 +59,6 @@ import org.junit.Test; import org.junit.rules.ExpectedException; -import java.io.File; -import java.io.IOException; -import java.util.Random; -import java.util.Set; -import java.util.TreeSet; -import java.util.UUID; -import java.util.Iterator; -import java.util.Optional; -import java.util.List; -import java.util.ArrayList; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -import java.util.stream.Collectors; -import java.util.stream.IntStream; - /** * Tests for Container ContainerManager. */ @@ -79,7 +78,7 @@ public class TestSCMContainerManager { public ExpectedException thrown = ExpectedException.none(); @BeforeClass public static void setUp() throws Exception { - Configuration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(); testDir = GenericTestUtils .getTestDir(TestSCMContainerManager.class.getSimpleName()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java index 9c11caa5b5d..1c2cdd0a8f0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java @@ -23,7 +23,7 @@ import java.io.IOException; import java.util.Iterator; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -57,7 +57,7 @@ public class TestUnknownContainerReport { @Before public void setup() throws IOException { - final Configuration conf = new OzoneConfiguration(); + final ConfigurationSource conf = new OzoneConfiguration(); this.nodeManager = new MockNodeManager(true, 10); this.containerManager = Mockito.mock(ContainerManager.class); this.containerStateManager = new ContainerStateManager(conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java index a454de2672a..a0d584641b3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -16,7 +16,10 @@ */ package org.apache.hadoop.hdds.scm.container.placement.algorithms; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -30,19 +33,15 @@ import org.apache.hadoop.hdds.scm.net.NodeSchema; import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.List; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; import static org.mockito.Matchers.anyObject; +import org.mockito.Mockito; import static org.mockito.Mockito.when; /** @@ -56,7 +55,7 @@ public class TestContainerPlacementFactory { // node storage capacity private final long storageCapacity = 100L; // configuration - private Configuration conf; + private OzoneConfiguration conf; // node manager private NodeManager nodeManager; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java index ddca0fa84ff..afefc9ac9f6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java @@ -21,7 +21,7 @@ import java.util.List; import java.util.Map; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -43,7 +43,7 @@ public class TestSCMContainerPlacementCapacity { @Test public void chooseDatanodes() throws SCMException { //given - Configuration conf = new OzoneConfiguration(); + ConfigurationSource conf = new OzoneConfiguration(); List datanodes = new ArrayList<>(); for (int i = 0; i < 7; i++) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java index 992f1c5b174..f659b7a850e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java @@ -21,7 +21,7 @@ import java.util.Collection; import java.util.List; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -60,7 +60,7 @@ @RunWith(Parameterized.class) public class TestSCMContainerPlacementRackAware { private NetworkTopology cluster; - private Configuration conf; + private ConfigurationSource conf; private NodeManager nodeManager; private Integer datanodeCount; private List datanodes = new ArrayList<>(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java index 91509a02a9e..797624697a9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java @@ -19,7 +19,7 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -42,7 +42,7 @@ public class TestSCMContainerPlacementRandom { @Test public void chooseDatanodes() throws SCMException { //given - Configuration conf = new OzoneConfiguration(); + ConfigurationSource conf = new OzoneConfiguration(); List datanodes = new ArrayList<>(); for (int i = 0; i < 5; i++) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 88085467c5d..de027ed1fb4 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -18,20 +18,23 @@ package org.apache.hadoop.hdds.scm.node; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.util.List; +import java.util.concurrent.TimeoutException; + import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.SCMContainerManager; -import org.apache.hadoop.hdds.scm.PlacementPolicy; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; @@ -40,25 +43,18 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.PathUtils; + +import org.apache.commons.io.IOUtils; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.junit.Assert.assertEquals; import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; import org.mockito.Mockito; -import java.io.File; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.TimeoutException; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys - .OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; -import static org.junit.Assert.assertEquals; - /** * Test for different container placement policy. */ @@ -101,7 +97,7 @@ SCMNodeManager createNodeManager(OzoneConfiguration config) return nodeManager; } - SCMContainerManager createContainerManager(Configuration config, + SCMContainerManager createContainerManager(ConfigurationSource config, NodeManager scmNodeManager) throws IOException { EventQueue eventQueue = new EventQueue(); final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index 340ebf59087..f9fb1505304 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -18,16 +18,16 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; -import java.io.IOException; -import java.util.List; - /** * Mock Ratis Pipeline Provider for Mock Nodes. */ @@ -37,27 +37,27 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider { private boolean isHealthy; public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, Configuration conf, + PipelineStateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher, boolean autoOpen) { super(nodeManager, stateManager, conf, eventPublisher); autoOpenPipeline = autoOpen; } public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, - Configuration conf) { + PipelineStateManager stateManager, + ConfigurationSource conf) { super(nodeManager, stateManager, conf, new EventQueue()); } public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, - Configuration conf, boolean isHealthy) { + PipelineStateManager stateManager, + ConfigurationSource conf, boolean isHealthy) { super(nodeManager, stateManager, conf, new EventQueue()); this.isHealthy = isHealthy; } public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, Configuration conf, + PipelineStateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { super(nodeManager, stateManager, conf, eventPublisher); autoOpenPipeline = true; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java index fafc4b0acec..b8b86227fbb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java @@ -18,7 +18,13 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.conf.Configuration; +import java.util.ArrayList; +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import java.util.stream.Collectors; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; @@ -26,19 +32,22 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.net.*; +import org.apache.hadoop.hdds.scm.net.NetConstants; +import org.apache.hadoop.hdds.scm.net.NetworkTopology; +import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; +import org.apache.hadoop.hdds.scm.net.Node; +import org.apache.hadoop.hdds.scm.net.NodeImpl; +import org.apache.hadoop.hdds.scm.net.NodeSchema; +import org.apache.hadoop.hdds.scm.net.NodeSchemaManager; import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.util.*; -import java.util.stream.Collectors; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; /** * Test for PipelinePlacementPolicy. @@ -190,7 +199,8 @@ public void testRackAwarenessNotEnabledWithFallBack() throws SCMException{ Assert.assertTrue(anchor.getNetworkLocation().equals( randomNode.getNetworkLocation())); - NetworkTopology topology = new NetworkTopologyImpl(new Configuration()); + NetworkTopology topology = + new NetworkTopologyImpl(new OzoneConfiguration()); DatanodeDetails nextNode = placementPolicy.chooseNodeBasedOnRackAwareness( nodesWithOutRackAwareness, new ArrayList<>( PIPELINE_PLACEMENT_MAX_NODES_COUNT), topology, anchor); @@ -229,7 +239,8 @@ public void testRackAwarenessNotEnabledWithFallBack() throws SCMException{ }; private NetworkTopology createNetworkTopologyOnDifRacks() { - NetworkTopology topology = new NetworkTopologyImpl(new Configuration()); + NetworkTopology topology = + new NetworkTopologyImpl(new OzoneConfiguration()); for (Node n : NODES) { topology.add(n); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 0ab7083f07f..fcb1c948648 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -18,13 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; -import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; -import static org.apache.hadoop.test.MetricsAsserts.getMetrics; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - import java.io.File; import java.io.IOException; import java.util.ArrayList; @@ -35,25 +28,30 @@ import java.util.concurrent.TimeoutException; import java.util.stream.Collectors; -import com.google.common.base.Supplier; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher - .PipelineReportFromDatanode; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.test.GenericTestUtils; + +import com.google.common.base.Supplier; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; +import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import org.junit.After; import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Test; @@ -63,7 +61,7 @@ public class TestSCMPipelineManager { private static MockNodeManager nodeManager; private static File testDir; - private static Configuration conf; + private static OzoneConfiguration conf; @Before public void setUp() throws Exception { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 8966cd8bcd1..0620883520b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -17,10 +17,6 @@ */ package org.apache.hadoop.hdds.scm.safemode; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.File; import java.util.ArrayList; import java.util.Collections; @@ -29,7 +25,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -37,8 +32,8 @@ import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.pipeline.*; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; @@ -47,7 +42,11 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.test.GenericTestUtils; + import org.junit.Assert; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -61,7 +60,7 @@ public class TestSCMSafeModeManager { private static EventQueue queue; private SCMSafeModeManager scmSafeModeManager; - private static Configuration config; + private static OzoneConfiguration config; private List containers = Collections.emptyList(); @Rule diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index bd89b88c488..663ac8c5b80 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -22,7 +22,6 @@ import java.util.Map; import java.util.UUID; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -83,7 +82,7 @@ public class TestEndPoint { private static RPC.Server scmServer; private static ScmTestMock scmServerImpl; private static File testDir; - private static Configuration config; + private static OzoneConfiguration config; @AfterClass public static void tearDown() throws Exception { @@ -292,7 +291,7 @@ private StorageReportProto getStorageReports(UUID id) { private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress, int rpcTimeout, boolean clearDatanodeDetails) throws Exception { - Configuration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(); EndpointStateMachine rpcEndPoint = createEndpoint(conf, scmAddress, rpcTimeout); @@ -453,7 +452,7 @@ private void addScmCommands() { private StateContext heartbeatTaskHelper(InetSocketAddress scmAddress, int rpcTimeout) throws Exception { - Configuration conf = SCMTestUtils.getConf(); + OzoneConfiguration conf = SCMTestUtils.getConf(); conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath()); // Mini Ozone cluster will not come up if the port is not true, since diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java index f0b1cbb146b..a0cf9574d58 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java @@ -16,27 +16,24 @@ */ package org.apache.hadoop.ozone.container.placement; -import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; -import org.apache.hadoop.conf.Configuration; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementCapacity; -import org.apache.hadoop.hdds.scm.container.placement.algorithms - .SCMContainerPlacementRandom; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; +import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.ozone.OzoneConsts; -import org.junit.Assert; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState - .HEALTHY; +import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY; +import org.junit.Assert; import static org.junit.Assert.assertEquals; +import org.junit.Test; /** * Asserts that allocation strategy works as expected. @@ -80,10 +77,11 @@ public void testCapacityPlacementYieldsBetterDataDistribution() throws .getStandardDeviation(), 0.001); SCMContainerPlacementCapacity capacityPlacer = new - SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration(), + SCMContainerPlacementCapacity(nodeManagerCapacity, + new OzoneConfiguration(), null, true, null); SCMContainerPlacementRandom randomPlacer = new - SCMContainerPlacementRandom(nodeManagerRandom, new Configuration(), + SCMContainerPlacementRandom(nodeManagerRandom, new OzoneConfiguration(), null, true, null); for (int x = 0; x < opsCount; x++) { diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 62bb385f1a0..1607f6322ad 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto; @@ -42,6 +43,7 @@ import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -51,9 +53,9 @@ import com.google.common.base.Preconditions; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; -import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -76,7 +78,7 @@ public XceiverClientManager getXceiverClientManager() { private final XceiverClientManager xceiverClientManager; - public ContainerOperationClient(Configuration conf) throws IOException { + public ContainerOperationClient(OzoneConfiguration conf) throws IOException { storageContainerLocationClient = newContainerRpcClient(conf); this.xceiverClientManager = newXCeiverClientManager(conf); containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE, @@ -93,7 +95,7 @@ public ContainerOperationClient(Configuration conf) throws IOException { } } - private XceiverClientManager newXCeiverClientManager(Configuration conf) + private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) throws IOException { XceiverClientManager manager; if (OzoneSecurityUtil.isSecurityEnabled(conf)) { @@ -112,14 +114,15 @@ private XceiverClientManager newXCeiverClientManager(Configuration conf) } public static StorageContainerLocationProtocol newContainerRpcClient( - Configuration conf) throws IOException { + ConfigurationSource configSource) throws IOException { Class protocol = StorageContainerLocationProtocolPB.class; - + Configuration conf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(configSource); RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); long version = RPC.getProtocolVersion(protocol); - InetSocketAddress scmAddress = getScmAddressForClients(conf); + InetSocketAddress scmAddress = getScmAddressForClients(configSource); UserGroupInformation user = UserGroupInformation.getCurrentUser(); SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(conf); int rpcTimeOut = Client.getRpcTimeout(conf); @@ -131,7 +134,7 @@ public static StorageContainerLocationProtocol newContainerRpcClient( StorageContainerLocationProtocolClientSideTranslatorPB client = new StorageContainerLocationProtocolClientSideTranslatorPB(rpcProxy); return TracingUtil.createProxy( - client, StorageContainerLocationProtocol.class, conf); + client, StorageContainerLocationProtocol.class, configSource); } @Override diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java index b5fcd18961e..b3efbda178a 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java @@ -26,7 +26,7 @@ import java.util.NoSuchElementException; import java.util.Objects; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.tracing.TracingUtil; @@ -67,7 +67,7 @@ public class ObjectStore { * @param conf Configuration object. * @param proxy ClientProtocol proxy. */ - public ObjectStore(Configuration conf, ClientProtocol proxy) { + public ObjectStore(ConfigurationSource conf, ClientProtocol proxy) { this.proxy = TracingUtil.createProxy(proxy, ClientProtocol.class, conf); this.listCacheSize = HddsClientUtils.getListCacheSize(conf); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java index e766ebd4e03..87710ea0115 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java @@ -21,7 +21,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.StorageType; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -104,7 +104,7 @@ public class OzoneBucket extends WithMetadata { private OzoneObj ozoneObj; - private OzoneBucket(Configuration conf, String volumeName, + private OzoneBucket(ConfigurationSource conf, String volumeName, String bucketName, ReplicationFactor defaultReplication, ReplicationType defaultReplicationType, ClientProtocol proxy) { Preconditions.checkNotNull(proxy, "Client proxy is not set."); @@ -133,7 +133,7 @@ private OzoneBucket(Configuration conf, String volumeName, .setStoreType(OzoneObj.StoreType.OZONE).build(); } @SuppressWarnings("parameternumber") - public OzoneBucket(Configuration conf, ClientProtocol proxy, + public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, String volumeName, String bucketName, StorageType storageType, Boolean versioning, long creationTime, Map metadata, String encryptionKeyName) { @@ -157,7 +157,7 @@ public OzoneBucket(Configuration conf, ClientProtocol proxy, * @param creationTime creation time of the bucket. */ @SuppressWarnings("parameternumber") - public OzoneBucket(Configuration conf, ClientProtocol proxy, + public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy, String volumeName, String bucketName, StorageType storageType, Boolean versioning, long creationTime, Map metadata) { this(conf, volumeName, bucketName, null, null, proxy); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java index 0d65d73fc3b..75ca50df95f 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClient.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import java.io.Closeable; @@ -81,7 +81,7 @@ public class OzoneClient implements Closeable { * @param conf Configuration object * @param proxy ClientProtocol proxy instance */ - public OzoneClient(Configuration conf, ClientProtocol proxy) { + public OzoneClient(ConfigurationSource conf, ClientProtocol proxy) { this.proxy = proxy; this.objectStore = new ObjectStore(conf, this.proxy); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java index b0a7a4e3930..8e4882a3712 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneClientFactory.java @@ -23,20 +23,20 @@ import java.io.IOException; import java.lang.reflect.Proxy; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.client.rpc.RpcClient; +import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; +import org.apache.hadoop.security.token.Token; import com.google.common.base.Preconditions; +import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; - -import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; -import org.apache.hadoop.security.token.Token; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -83,7 +83,7 @@ public static OzoneClient getRpcClient() throws IOException { * @throws IOException */ public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, - Configuration config) + ConfigurationSource config) throws IOException { Preconditions.checkNotNull(omHost); Preconditions.checkNotNull(omRpcPort); @@ -106,7 +106,7 @@ public static OzoneClient getRpcClient(String omHost, Integer omRpcPort, * @throws IOException */ public static OzoneClient getRpcClient(String omServiceId, - Configuration config) throws IOException { + ConfigurationSource config) throws IOException { Preconditions.checkNotNull(omServiceId); Preconditions.checkNotNull(config); if (OmUtils.isOmHAServiceId(config, omServiceId)) { @@ -129,7 +129,7 @@ public static OzoneClient getRpcClient(String omServiceId, * * @throws IOException */ - public static OzoneClient getRpcClient(Configuration config) + public static OzoneClient getRpcClient(ConfigurationSource config) throws IOException { Preconditions.checkNotNull(config); @@ -156,7 +156,7 @@ public static OzoneClient getRpcClient(Configuration config) * Configuration to be used for OzoneClient creation */ private static OzoneClient getRpcClient(ClientProtocol clientProtocol, - Configuration config) { + ConfigurationSource config) { OzoneClientInvocationHandler clientHandler = new OzoneClientInvocationHandler(clientProtocol); ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance( @@ -225,7 +225,7 @@ public static OzoneClient getOzoneClient(Configuration conf, * * @throws IOException */ - private static ClientProtocol getClientProtocol(Configuration config) + private static ClientProtocol getClientProtocol(ConfigurationSource config) throws IOException { return getClientProtocol(config, null); } @@ -241,7 +241,7 @@ private static ClientProtocol getClientProtocol(Configuration config) * * @throws IOException */ - private static ClientProtocol getClientProtocol(Configuration config, + private static ClientProtocol getClientProtocol(ConfigurationSource config, String omServiceId) throws IOException { try { return new RpcClient(config, omServiceId); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index cca923ad222..3b4dedecf2e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -26,7 +26,7 @@ import java.util.Map; import java.util.NoSuchElementException; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.OzoneAcl; @@ -88,10 +88,11 @@ public class OzoneVolume extends WithMetadata { * @param metadata custom key value metadata. */ @SuppressWarnings("parameternumber") - public OzoneVolume(Configuration conf, ClientProtocol proxy, String name, - String admin, String owner, long quotaInBytes, - long creationTime, List acls, - Map metadata) { + public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, + String name, + String admin, String owner, long quotaInBytes, + long creationTime, List acls, + Map metadata) { Preconditions.checkNotNull(proxy, "Client proxy is not set."); this.proxy = proxy; this.name = name; @@ -105,9 +106,10 @@ public OzoneVolume(Configuration conf, ClientProtocol proxy, String name, } @SuppressWarnings("parameternumber") - public OzoneVolume(Configuration conf, ClientProtocol proxy, String name, - String admin, String owner, long quotaInBytes, - long creationTime, List acls) { + public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, + String name, + String admin, String owner, long quotaInBytes, + long creationTime, List acls) { this(conf, proxy, name, admin, owner, quotaInBytes, creationTime, acls, new HashMap<>()); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java index 6be77709d4b..84c341df6ee 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/OzoneKMSUtil.java @@ -18,6 +18,12 @@ package org.apache.hadoop.ozone.client.rpc; +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.security.GeneralSecurityException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.CipherSuite; import org.apache.hadoop.crypto.CryptoCodec; @@ -26,6 +32,8 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -33,12 +41,6 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.KMSUtil; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.nio.charset.StandardCharsets; -import java.security.GeneralSecurityException; - /** * KMS utility class for Ozone Data Encryption At-Rest. */ @@ -94,7 +96,7 @@ private static String bytes2String(byte[] bytes, int offset, int length) { } public static URI getKeyProviderUri(UserGroupInformation ugi, - URI namespaceUri, String kmsUriSrv, Configuration conf) + URI namespaceUri, String kmsUriSrv, ConfigurationSource conf) throws IOException { URI keyProviderUri = null; Credentials credentials = ugi.getCredentials(); @@ -110,8 +112,10 @@ public static URI getKeyProviderUri(UserGroupInformation ugi, if (keyProviderUri == null) { // from client conf if (kmsUriSrv == null) { + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); keyProviderUri = KMSUtil.getKeyProviderUri( - conf, keyProviderUriKeyName); + hadoopConfig, keyProviderUriKeyName); } else if (!kmsUriSrv.isEmpty()) { // from om server keyProviderUri = URI.create(kmsUriSrv); @@ -126,12 +130,14 @@ public static URI getKeyProviderUri(UserGroupInformation ugi, return keyProviderUri; } - public static KeyProvider getKeyProvider(final Configuration conf, + public static KeyProvider getKeyProvider(final ConfigurationSource conf, final URI serverProviderUri) throws IOException{ if (serverProviderUri == null) { throw new IOException("KMS serverProviderUri is not configured."); } - return KMSUtil.createKeyProviderFromUri(conf, serverProviderUri); + return KMSUtil.createKeyProviderFromUri( + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf), + serverProviderUri); } public static CryptoProtocolVersion getCryptoProtocolVersion( @@ -156,14 +162,16 @@ public static void checkCryptoProtocolVersion( } } - public static CryptoCodec getCryptoCodec(Configuration conf, + public static CryptoCodec getCryptoCodec(ConfigurationSource conf, FileEncryptionInfo feInfo) throws IOException { CipherSuite suite = feInfo.getCipherSuite(); if (suite.equals(CipherSuite.UNKNOWN)) { throw new IOException("NameNode specified unknown CipherSuite with ID " + suite.getUnknownValue() + ", cannot instantiate CryptoCodec."); } else { - CryptoCodec codec = CryptoCodec.getInstance(conf, suite); + Configuration hadoopConfig = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + CryptoCodec codec = CryptoCodec.getInstance(hadoopConfig, suite); if (codec == null) { throw new OMException("No configuration found for the cipher suite " + suite.getConfigSuffix() + " prefixed with " + diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 5836017969c..13821178445 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -18,29 +18,54 @@ package org.apache.hadoop.ozone.client.rpc; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; +import javax.crypto.Cipher; +import javax.crypto.CipherInputStream; +import javax.crypto.CipherOutputStream; +import java.io.IOException; +import java.net.URI; +import java.security.InvalidKeyException; +import java.security.SecureRandom; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.function.Function; +import java.util.stream.Collectors; + import org.apache.hadoop.crypto.CryptoInputStream; import org.apache.hadoop.crypto.CryptoOutputStream; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.fs.FileEncryptionInfo; +import org.apache.hadoop.hdds.client.OzoneQuota; +import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.StorageType; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos - .ChecksumType; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.ozone.client.*; -import org.apache.hadoop.hdds.client.OzoneQuota; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.ozone.client.BucketArgs; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneKeyLocation; +import org.apache.hadoop.ozone.client.OzoneMultipartUpload; +import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; +import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; +import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.io.KeyInputStream; import org.apache.hadoop.ozone.client.io.KeyOutputStream; @@ -69,12 +94,7 @@ import org.apache.hadoop.ozone.om.helpers.ServiceInfo; import org.apache.hadoop.ozone.om.helpers.ServiceInfoEx; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; -import org.apache.hadoop.ozone.om.protocolPB - .OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.OzoneAcl; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRoleInfo; import org.apache.hadoop.ozone.security.GDPRSymmetricKey; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; @@ -85,25 +105,15 @@ import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.io.Text; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import org.apache.logging.log4j.util.Strings; import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import javax.crypto.Cipher; -import javax.crypto.CipherInputStream; -import javax.crypto.CipherOutputStream; -import java.io.IOException; -import java.net.URI; -import java.security.InvalidKeyException; -import java.security.SecureRandom; -import java.util.*; -import java.util.function.Function; -import java.util.stream.Collectors; - -import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; - /** * Ozone RPC Client Implementation, it connects to OM, SCM and DataNode * to execute client calls. This uses RPC protocol for communication @@ -114,7 +124,7 @@ public class RpcClient implements ClientProtocol { private static final Logger LOG = LoggerFactory.getLogger(RpcClient.class); - private final OzoneConfiguration conf; + private final ConfigurationSource conf; private final OzoneManagerProtocol ozoneManagerClient; private final XceiverClientManager xceiverClientManager; private final int chunkSize; @@ -140,9 +150,10 @@ public class RpcClient implements ClientProtocol { * @param omServiceId OM HA Service ID, set this to null if not HA * @throws IOException */ - public RpcClient(Configuration conf, String omServiceId) throws IOException { + public RpcClient(ConfigurationSource conf, String omServiceId) + throws IOException { Preconditions.checkNotNull(conf); - this.conf = new OzoneConfiguration(conf); + this.conf = conf; this.ugi = UserGroupInformation.getCurrentUser(); // Get default acl rights for user and group. OzoneAclConfig aclConfig = this.conf.getObject(OzoneAclConfig.class); diff --git a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java index b6123830b98..8f8659d6e9d 100644 --- a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java +++ b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestHddsClientUtils.java @@ -18,23 +18,18 @@ package org.apache.hadoop.ozone.client; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.List; + import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; -import org.junit.rules.Timeout; - -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.List; +import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT; @@ -43,6 +38,10 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.Timeout; /** * This test class verifies the parsing of SCM endpoint config settings. The @@ -61,7 +60,7 @@ public class TestHddsClientUtils { */ @Test public void testMissingScmClientAddress() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); thrown.expect(IllegalArgumentException.class); HddsUtils.getScmAddressForClients(conf); } @@ -72,7 +71,7 @@ public void testMissingScmClientAddress() { */ @Test public void testGetScmClientAddress() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); // First try a client address with just a host name. Verify it falls // back to the default port. @@ -91,7 +90,7 @@ public void testGetScmClientAddress() { @Test public void testgetOmSocketAddress() { - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); // First try a client address with just a host name. Verify it falls // back to the default port. @@ -119,7 +118,7 @@ public void testBlockClientFallbackToClientNoPort() { // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY is undefined it should // fallback to OZONE_SCM_CLIENT_ADDRESS_KEY. final String scmHost = "host123"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( conf); @@ -136,7 +135,7 @@ public void testBlockClientFallbackToClientWithPort() { // Verify that the OZONE_SCM_CLIENT_ADDRESS_KEY port number is ignored, // if present. Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT. final String scmHost = "host123:100"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost); final InetSocketAddress address =HddsUtils.getScmAddressForBlockClients( conf); @@ -149,7 +148,7 @@ public void testBlockClientFallbackToScmNamesNoPort() { // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY // are undefined it should fallback to OZONE_SCM_NAMES. final String scmHost = "host456"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( conf); @@ -166,7 +165,7 @@ public void testBlockClientFallbackToScmNamesWithPort() { // Verify that the OZONE_SCM_NAMES port number is ignored, if present. // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT. final String scmHost = "host456:200"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsUtils.getScmAddressForBlockClients( conf); @@ -179,7 +178,7 @@ public void testClientFallbackToScmNamesNoPort() { // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, it should fallback // to OZONE_SCM_NAMES. final String scmHost = "host456"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf); assertEquals(scmHost, address.getHostName()); @@ -195,7 +194,7 @@ public void testClientFallbackToScmNamesWithPort() { // Verify that the OZONE_SCM_NAMES port number is ignored, if present. // Instead we should use OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT. final String scmHost = "host456:300"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); final InetSocketAddress address = HddsUtils.getScmAddressForClients(conf); assertEquals(scmHost.split(":")[0], address.getHostName()); @@ -207,7 +206,7 @@ public void testBlockClientFailsWithMultipleScmNames() { // When OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY and OZONE_SCM_CLIENT_ADDRESS_KEY // are undefined, fail if OZONE_SCM_NAMES has multiple SCMs. final String scmHost = "host123,host456"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); thrown.expect(IllegalArgumentException.class); HddsUtils.getScmAddressForBlockClients(conf); @@ -218,7 +217,7 @@ public void testClientFailsWithMultipleScmNames() { // When OZONE_SCM_CLIENT_ADDRESS_KEY is undefined, fail if OZONE_SCM_NAMES // has multiple SCMs. final String scmHost = "host123,host456"; - final Configuration conf = new OzoneConfiguration(); + final OzoneConfiguration conf = new OzoneConfiguration(); conf.set(OZONE_SCM_NAMES, scmHost); thrown.expect(IllegalArgumentException.class); HddsUtils.getScmAddressForClients(conf); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 249b17a3e99..8f12b6ea46e 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -34,6 +34,7 @@ import java.util.OptionalInt; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.net.NetUtils; @@ -78,18 +79,18 @@ private OmUtils() { * @param conf * @return Target InetSocketAddress for the SCM service endpoint. */ - public static InetSocketAddress getOmAddress(Configuration conf) { + public static InetSocketAddress getOmAddress(ConfigurationSource conf) { return NetUtils.createSocketAddr(getOmRpcAddress(conf)); } /** * Return list of OM addresses by service ids - when HA is enabled. * - * @param conf {@link Configuration} + * @param conf {@link ConfigurationSource} * @return {service.id -> [{@link InetSocketAddress}]} */ public static Map> getOmHAAddressesById( - Configuration conf) { + ConfigurationSource conf) { Map> result = new HashMap<>(); for (String serviceId : conf.getTrimmedStringCollection( OZONE_OM_SERVICE_IDS_KEY)) { @@ -115,7 +116,7 @@ public static Map> getOmHAAddressesById( * @param conf * @return Target InetSocketAddress for the SCM service endpoint. */ - public static String getOmRpcAddress(Configuration conf) { + public static String getOmRpcAddress(ConfigurationSource conf) { final Optional host = getHostNameFromConfigKeys(conf, OZONE_OM_ADDRESS_KEY); @@ -130,7 +131,8 @@ public static String getOmRpcAddress(Configuration conf) { * @param confKey configuration key to lookup address from * @return Target InetSocketAddress for the OM RPC server. */ - public static String getOmRpcAddress(Configuration conf, String confKey) { + public static String getOmRpcAddress(ConfigurationSource conf, + String confKey) { final Optional host = getHostNameFromConfigKeys(conf, confKey); if (host.isPresent()) { @@ -148,7 +150,7 @@ public static String getOmRpcAddress(Configuration conf, String confKey) { * @return Target InetSocketAddress for the OM service endpoint. */ public static InetSocketAddress getOmAddressForClients( - Configuration conf) { + ConfigurationSource conf) { final Optional host = getHostNameFromConfigKeys(conf, OZONE_OM_ADDRESS_KEY); @@ -169,7 +171,7 @@ public static InetSocketAddress getOmAddressForClients( * @return true if OZONE_OM_SERVICE_IDS_KEY is defined and not empty; * else false. */ - public static boolean isServiceIdsDefined(Configuration conf) { + public static boolean isServiceIdsDefined(ConfigurationSource conf) { String val = conf.get(OZONE_OM_SERVICE_IDS_KEY); return val != null && val.length() > 0; } @@ -180,13 +182,14 @@ public static boolean isServiceIdsDefined(Configuration conf) { * @param serviceId OM HA cluster service ID * @return true if HA is configured in the configuration; else false. */ - public static boolean isOmHAServiceId(Configuration conf, String serviceId) { + public static boolean isOmHAServiceId(ConfigurationSource conf, + String serviceId) { Collection omServiceIds = conf.getTrimmedStringCollection( OZONE_OM_SERVICE_IDS_KEY); return omServiceIds.contains(serviceId); } - public static int getOmRpcPort(Configuration conf) { + public static int getOmRpcPort(ConfigurationSource conf) { return getPortNumberFromConfigKeys(conf, OZONE_OM_ADDRESS_KEY) .orElse(OZONE_OM_PORT_DEFAULT); } @@ -198,12 +201,12 @@ public static int getOmRpcPort(Configuration conf) { * @param confKey configuration key to lookup address from * @return Port on which OM RPC server will listen on */ - public static int getOmRpcPort(Configuration conf, String confKey) { + public static int getOmRpcPort(ConfigurationSource conf, String confKey) { return getPortNumberFromConfigKeys(conf, confKey) .orElse(OZONE_OM_PORT_DEFAULT); } - public static int getOmRestPort(Configuration conf) { + public static int getOmRestPort(ConfigurationSource conf) { return getPortNumberFromConfigKeys(conf, OZONE_OM_HTTP_ADDRESS_KEY) .orElse(OZONE_OM_HTTP_BIND_PORT_DEFAULT); } @@ -334,7 +337,7 @@ public static boolean isAddressLocal(InetSocketAddress addr) { /** * Get a collection of all omNodeIds for the given omServiceId. */ - public static Collection getOMNodeIds(Configuration conf, + public static Collection getOMNodeIds(ConfigurationSource conf, String omServiceId) { String key = addSuffix(OZONE_OM_NODES_KEY, omServiceId); return conf.getTrimmedStringCollection(key); @@ -361,7 +364,7 @@ public static Collection emptyAsSingletonNull(Collection * @return if the value is set for key suffixed with OM Node ID, return the * value, else return null. */ - public static String getConfSuffixedWithOMNodeId(Configuration conf, + public static String getConfSuffixedWithOMNodeId(ConfigurationSource conf, String confKey, String omServiceID, String omNodeId) { String suffixedConfKey = OmUtils.addKeySuffixes( confKey, omServiceID, omNodeId); @@ -379,7 +382,7 @@ public static String getConfSuffixedWithOMNodeId(Configuration conf, * @param omNodeHostAddr peer OM node host address * @return http address of peer OM node in the format : */ - public static String getHttpAddressForOMPeerNode(Configuration conf, + public static String getHttpAddressForOMPeerNode(ConfigurationSource conf, String omServiceId, String omNodeId, String omNodeHostAddr) { final Optional bindHost = getHostNameFromConfigKeys(conf, addKeySuffixes(OZONE_OM_HTTP_BIND_HOST_KEY, omServiceId, omNodeId)); @@ -402,7 +405,7 @@ public static String getHttpAddressForOMPeerNode(Configuration conf, * @param omNodeHostAddr peer OM node host address * @return https address of peer OM node in the format : */ - public static String getHttpsAddressForOMPeerNode(Configuration conf, + public static String getHttpsAddressForOMPeerNode(ConfigurationSource conf, String omServiceId, String omNodeId, String omNodeHostAddr) { final Optional bindHost = getHostNameFromConfigKeys(conf, addKeySuffixes(OZONE_OM_HTTPS_BIND_HOST_KEY, omServiceId, omNodeId)); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java index 9bb06eadc72..e14fe3fa5b7 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java @@ -25,12 +25,13 @@ import java.util.Collection; import java.util.HashMap; import java.util.Map; + import org.apache.hadoop.HadoopIllegalArgumentException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; -import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.StringUtils; @@ -175,11 +176,12 @@ int doWorkInternal(OzoneGetConf tool, String[] args) throws Exception { private final PrintStream out; // Stream for printing command output private final PrintStream err; // Stream for printing error - protected OzoneGetConf(Configuration conf) { + protected OzoneGetConf(OzoneConfiguration conf) { this(conf, System.out, System.err); } - protected OzoneGetConf(Configuration conf, PrintStream out, PrintStream err) { + protected OzoneGetConf(OzoneConfiguration conf, PrintStream out, + PrintStream err) { super(conf); this.out = out; this.err = err; @@ -234,7 +236,7 @@ static class StorageContainerManagersCommandHandler extends CommandHandler { public int doWorkInternal(OzoneGetConf tool, String[] args) throws IOException { Collection addresses = HddsUtils - .getSCMAddresses(tool.getConf()); + .getSCMAddresses(OzoneConfiguration.of(tool.getConf())); for (InetSocketAddress addr : addresses) { tool.printOut(addr.getHostName()); @@ -250,11 +252,15 @@ static class OzoneManagersCommandHandler extends CommandHandler { @Override public int doWorkInternal(OzoneGetConf tool, String[] args) throws IOException { - if (OmUtils.isServiceIdsDefined(tool.getConf())) { - tool.printOut(OmUtils.getOmHAAddressesById(tool.getConf()).toString()); + ConfigurationSource configSource = + OzoneConfiguration.of(tool.getConf()); + if (OmUtils.isServiceIdsDefined( + configSource)) { + tool.printOut(OmUtils.getOmHAAddressesById(configSource).toString()); } else { - tool.printOut(OmUtils.getOmAddress(tool.getConf()).getHostName()); + tool.printOut(OmUtils.getOmAddress(configSource).getHostName()); } + return 0; } } @@ -264,7 +270,7 @@ public static void main(String[] args) throws Exception { System.exit(0); } - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); conf.addResource(new OzoneConfiguration()); int res = ToolRunner.run(new OzoneGetConf(conf), args); System.exit(res); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java index db24c8f0c43..f2ddc7d7ffe 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/ha/OMFailoverProxyProvider.java @@ -18,9 +18,22 @@ package org.apache.hadoop.ozone.om.ha; -import com.google.common.annotations.VisibleForTesting; +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.FailoverProxyProvider; import org.apache.hadoop.io.retry.RetryInvocationHandler; @@ -30,25 +43,14 @@ import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; +import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.security.UserGroupInformation; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.Closeable; -import java.io.IOException; -import java.net.InetSocketAddress; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; +import com.google.common.annotations.VisibleForTesting; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * A failover proxy provider implementation which allows clients to configure @@ -69,13 +71,14 @@ public class OMFailoverProxyProvider implements private String currentProxyOMNodeId; private int currentProxyIndex; - private final Configuration conf; + private final ConfigurationSource conf; private final long omVersion; private final UserGroupInformation ugi; private final Text delegationTokenService; private final String omServiceId; + // OMFailoverProxyProvider, on encountering certain exception, tries each OM // once in a round robin fashion. After that it waits for configured time // before attempting to contact all the OMs again. For other exceptions @@ -86,7 +89,7 @@ public class OMFailoverProxyProvider implements private int numAttemptsOnSameOM = 0; private final long waitBetweenRetries; - public OMFailoverProxyProvider(OzoneConfiguration configuration, + public OMFailoverProxyProvider(ConfigurationSource configuration, UserGroupInformation ugi, String omServiceId) throws IOException { this.conf = configuration; this.omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class); @@ -108,7 +111,7 @@ public OMFailoverProxyProvider(OzoneConfiguration configuration, this(configuration, ugi, null); } - private void loadOMClientConfigs(Configuration config, String omSvcId) + private void loadOMClientConfigs(ConfigurationSource config, String omSvcId) throws IOException { this.omProxies = new HashMap<>(); this.omProxyInfos = new HashMap<>(); @@ -165,11 +168,14 @@ public synchronized String getCurrentProxyOMNodeId() { private OzoneManagerProtocolPB createOMProxy(InetSocketAddress omAddress) throws IOException { - RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class, + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + RPC.setProtocolEngine(hadoopConf, OzoneManagerProtocolPB.class, ProtobufRpcEngine.class); return RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress, ugi, - conf, NetUtils.getDefaultSocketFactory(conf), - (int) OmUtils.getOMClientRpcTimeOut(conf)); + hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf), + (int) OmUtils.getOMClientRpcTimeOut(hadoopConf)); + } /** diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java index 31f09244623..41129410a59 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/lock/OzoneManagerLock.java @@ -26,7 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.ozone.lock.LockManager; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_MANAGER_FAIR_LOCK_DEFAULT; @@ -91,7 +91,7 @@ public class OzoneManagerLock { * Creates new OzoneManagerLock instance. * @param conf Configuration object */ - public OzoneManagerLock(Configuration conf) { + public OzoneManagerLock(ConfigurationSource conf) { boolean fair = conf.getBoolean(OZONE_MANAGER_FAIR_LOCK, OZONE_MANAGER_FAIR_LOCK_DEFAULT); manager = new LockManager<>(conf, fair); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java index bcc676c4f87..a95d45ef74a 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/protocolPB/OzoneManagerProtocolClientSideTranslatorPB.java @@ -24,7 +24,7 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.io.Text; @@ -201,7 +201,7 @@ public OzoneManagerProtocolClientSideTranslatorPB( * one {@link OzoneManagerProtocolPB} proxy pointing to each OM node in the * cluster. */ - public OzoneManagerProtocolClientSideTranslatorPB(OzoneConfiguration conf, + public OzoneManagerProtocolClientSideTranslatorPB(ConfigurationSource conf, String clientId, String omServiceId, UserGroupInformation ugi) throws IOException { this.omFailoverProxyProvider = new OMFailoverProxyProvider(conf, ugi, diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java index ae295e34f55..1cdea8b0e31 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/web/utils/OzoneUtils.java @@ -29,7 +29,7 @@ import java.util.concurrent.TimeUnit; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.client.HddsClientUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -148,7 +148,8 @@ public static void verifyResourceName(String resName) * Return the TimeDuration configured for the given key. If not configured, * return the default value. */ - public static TimeDuration getTimeDuration(Configuration conf, String key, + public static TimeDuration getTimeDuration(ConfigurationSource conf, + String key, TimeDuration defaultValue) { TimeUnit defaultTimeUnit = defaultValue.getUnit(); long timeDurationInDefaultUnit = conf.getTimeDuration(key, @@ -159,7 +160,7 @@ public static TimeDuration getTimeDuration(Configuration conf, String key, /** * Return the time configured for the given key in milliseconds. */ - public static long getTimeDurationInMS(Configuration conf, String key, + public static long getTimeDurationInMS(ConfigurationSource conf, String key, TimeDuration defaultValue) { return getTimeDuration(conf, key, defaultValue) .toLong(TimeUnit.MILLISECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java index dd5431584cc..19ff428f07c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java @@ -18,14 +18,15 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractCreateTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Ozone contract tests creating files. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java index f0a3d8d83eb..33e6260a248 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java @@ -18,14 +18,15 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractDeleteTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Ozone contract tests covering deletes. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java index 134a9adf316..ce63456f29d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java @@ -18,13 +18,14 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.tools.contract.AbstractContractDistCpTest; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Contract test suite covering S3A integration with DistCp. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java index 362b22f2831..9d9aa565d43 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java @@ -18,16 +18,17 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractGetFileStatusTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.IOException; - /** * Ozone contract tests covering getFileStatus. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java index bc0de5dfb79..305164c7e60 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java @@ -18,14 +18,15 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractMkdirTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Test dir operations on Ozone. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java index 0bc57d49a9c..aa81965bb85 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java @@ -18,14 +18,15 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractOpenTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Ozone contract tests opening files. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java index 8ce1d1b618d..3660d81b93e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java @@ -18,14 +18,15 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRenameTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Ozone contract tests covering rename. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java index 3156eb2f888..c64dafae2e1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java @@ -18,14 +18,14 @@ package org.apache.hadoop.fs.ozone.contract; -import org.junit.AfterClass; -import org.junit.BeforeClass; +import java.io.IOException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractRootDirectoryTest; import org.apache.hadoop.fs.contract.AbstractFSContract; -import java.io.IOException; +import org.junit.AfterClass; +import org.junit.BeforeClass; /** * Ozone contract test for ROOT directory operations. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java index c4bc0ff119a..2f2202547a5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java @@ -18,14 +18,15 @@ package org.apache.hadoop.fs.ozone.contract; +import java.io.IOException; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.contract.AbstractContractSeekTest; import org.apache.hadoop.fs.contract.AbstractFSContract; + import org.junit.AfterClass; import org.junit.BeforeClass; -import java.io.IOException; - /** * Ozone contract tests covering file seek. */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index 4b6cf8cf574..d1d51624c0a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -17,28 +17,26 @@ */ package org.apache.hadoop.ozone; -import org.apache.hadoop.conf.Configuration; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.UUID; +import java.util.concurrent.TimeoutException; + import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OzoneManager; -import org.apache.hadoop.hdds.scm.protocolPB - .StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.recon.ReconServer; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.test.GenericTestUtils; -import java.io.IOException; -import java.util.List; -import java.util.Optional; -import java.util.OptionalInt; -import java.util.UUID; -import java.util.concurrent.TimeoutException; - /** * Interface used for MiniOzoneClusters. */ @@ -71,7 +69,7 @@ static Builder newHABuilder(OzoneConfiguration conf) { * * @return Configuration */ - Configuration getConf(); + OzoneConfiguration getConf(); /** * Waits for the cluster to be ready, this call blocks till all the diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index b83a2c9875b..535ca91b490 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -23,7 +23,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.ratis.RatisHelper; @@ -97,7 +96,7 @@ static OzoneConfiguration newOzoneConfiguration(RpcType rpc) { return conf; } - static void initRatisConf(RpcType rpc, Configuration conf) { + static void initRatisConf(RpcType rpc, OzoneConfiguration conf) { conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY, rpc.name()); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 386a72767ce..8f8fe27027d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -18,15 +18,6 @@ package org.apache.hadoop.ozone; -import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; -import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; -import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - import java.io.File; import java.io.FileOutputStream; import java.io.FileReader; @@ -35,9 +26,6 @@ import java.util.HashSet; import java.util.List; -import org.apache.commons.io.FileUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.DFSConfigKeysLegacy; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -54,9 +42,20 @@ import org.apache.hadoop.ozone.container.ozoneimpl.TestOzoneContainer; import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.TestGenericTestUtils; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang3.RandomUtils; +import static org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; +import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; +import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT; import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import org.junit.BeforeClass; import org.junit.Test; import org.yaml.snakeyaml.Yaml; @@ -190,7 +189,7 @@ public void testDatanodeIDPersistent() throws Exception { @Test public void testContainerRandomPort() throws IOException { - Configuration ozoneConf = SCMTestUtils.getConf(); + OzoneConfiguration ozoneConf = SCMTestUtils.getConf(); File testDir = PathUtils.getTestDir(TestOzoneContainer.class); ozoneConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index 9beddd4a71b..e36bf73bdf5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -21,9 +21,11 @@ import java.util.Map; import java.util.Set; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.utils.MetadataKeyFilters; +import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; @@ -32,8 +34,6 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -48,10 +48,10 @@ public class TestStorageContainerManagerHelper { private final MiniOzoneCluster cluster; - private final Configuration conf; + private final OzoneConfiguration conf; public TestStorageContainerManagerHelper(MiniOzoneCluster cluster, - Configuration conf) throws IOException { + OzoneConfiguration conf) throws IOException { this.cluster = cluster; this.conf = conf; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java index d05093f2893..7664d757aa1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/CertificateClientTestImpl.java @@ -16,17 +16,6 @@ */ package org.apache.hadoop.ozone.client; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.SecurityConfig; -import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; -import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.bouncycastle.cert.X509CertificateHolder; -import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; - import java.io.InputStream; import java.security.KeyPair; import java.security.PrivateKey; @@ -37,6 +26,17 @@ import java.time.temporal.ChronoUnit; import java.util.List; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.security.x509.SecurityConfig; +import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; +import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest; +import org.apache.hadoop.hdds.security.x509.certificates.utils.SelfSignedCertificate; +import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException; +import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; + +import org.bouncycastle.cert.X509CertificateHolder; +import org.bouncycastle.cert.jcajce.JcaX509CertificateConverter; + /** * Test implementation for CertificateClient. To be used only for test * purposes. @@ -46,7 +46,7 @@ public class CertificateClientTestImpl implements CertificateClient { private final SecurityConfig securityConfig; private final KeyPair keyPair; - private final Configuration config; + private final OzoneConfiguration config; private final X509Certificate x509Certificate; public CertificateClientTestImpl(OzoneConfiguration conf) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java index 3f4b19a56d4..324db98105f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneAtRestEncryption.java @@ -17,7 +17,14 @@ */ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.security.NoSuchAlgorithmException; +import java.time.Instant; +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; + import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.kms.KMSClientProvider; import org.apache.hadoop.crypto.key.kms.server.MiniKMS; @@ -49,22 +56,14 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.test.GenericTestUtils; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Ignore; import org.junit.Test; -import java.io.File; -import java.io.IOException; -import java.security.NoSuchAlgorithmException; -import java.time.Instant; -import java.util.HashMap; -import java.util.Map; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - /** * This class is to test all the public facing APIs of Ozone Client. */ @@ -312,7 +311,7 @@ private static String getKeyProviderURI(MiniKMS kms) { } private static void createKey(String keyName, KeyProvider - provider, Configuration config) + provider, OzoneConfiguration config) throws NoSuchAlgorithmException, IOException { final KeyProvider.Options options = KeyProvider.options(config); options.setDescription(keyName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index e993c5b5b58..750164574a1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -34,8 +34,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.client.OzoneQuota; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; @@ -102,7 +100,7 @@ import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; - +import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; @@ -112,9 +110,7 @@ import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.READ; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.either; - import org.junit.Assert; - import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotEquals; @@ -122,7 +118,6 @@ import static org.junit.Assert.assertThat; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; - import org.junit.Ignore; import org.junit.Test; import org.slf4j.Logger; @@ -956,7 +951,7 @@ private void createAndCorruptKey(String volumeName, String bucketName, private void readCorruptedKey(String volumeName, String bucketName, String keyName, boolean verifyChecksum) { try { - Configuration configuration = cluster.getConf(); + OzoneConfiguration configuration = cluster.getConf(); configuration.setBoolean(OzoneConfigKeys.OZONE_CLIENT_VERIFY_CHECKSUM, verifyChecksum); RpcClient client = new RpcClient(configuration, null); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index cd3882bac6c..61e53ebf5b1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -17,21 +17,21 @@ */ package org.apache.hadoop.ozone.dn.ratis; -import org.apache.hadoop.conf.Configuration; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.PrintStream; +import java.util.UUID; + import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.segmentparser.DatanodeRatisLogParser; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.segmentparser.DatanodeRatisLogParser; + import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; -import java.io.ByteArrayOutputStream; -import java.io.File; -import java.io.PrintStream; -import java.util.UUID; - /** * Test Datanode Ratis log parser. */ @@ -64,7 +64,7 @@ public void destroy() throws Exception { @Test public void testRatisLogParsing() { cluster.stop(); - Configuration conf = cluster.getHddsDatanodes().get(0).getConf(); + OzoneConfiguration conf = cluster.getHddsDatanodes().get(0).getConf(); String path = conf.get(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR); UUID pid = cluster.getStorageContainerManager().getPipelineManager() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java index 27ad7d817b7..1f483345eb2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java @@ -26,7 +26,7 @@ import com.google.protobuf.ServiceException; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; @@ -80,7 +80,7 @@ public class KeyDeletingService extends BackgroundService { KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, KeyManager manager, long serviceInterval, - long serviceTimeout, Configuration conf) { + long serviceTimeout, ConfigurationSource conf) { super("KeyDeletingService", serviceInterval, TimeUnit.MILLISECONDS, KEY_DELETING_CORE_POOL_SIZE, serviceTimeout); this.ozoneManager = ozoneManager; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java index 12a8017d039..c527af9f71c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMStorage.java @@ -22,7 +22,7 @@ import java.util.Properties; import java.util.UUID; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType; import org.apache.hadoop.hdds.server.ServerUtils; @@ -114,7 +114,7 @@ protected Properties getNodeProperties() { * @param conf - Config * @return File path, after creating all the required Directories. */ - public static File getOmDbDir(Configuration conf) { + public static File getOmDbDir(ConfigurationSource conf) { return ServerUtils.getDBPath(conf, OMConfigKeys.OZONE_OM_DB_DIRS); } } \ No newline at end of file diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java index 38dc1ad19c3..79f50eaab5c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerHttpServer.java @@ -17,12 +17,12 @@ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.server.http.BaseHttpServer; - import java.io.IOException; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.server.http.BaseHttpServer; +import org.apache.hadoop.ozone.OzoneConsts; + import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT; @@ -31,7 +31,7 @@ */ public class OzoneManagerHttpServer extends BaseHttpServer { - public OzoneManagerHttpServer(Configuration conf, OzoneManager om) + public OzoneManagerHttpServer(ConfigurationSource conf, OzoneManager om) throws IOException { super(conf, "ozoneManager"); addServlet("serviceList", OZONE_OM_SERVICE_LIST_HTTP_ENDPOINT, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index b0eb061a82a..24ce9db8871 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om.ratis; -import com.google.common.annotations.VisibleForTesting; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; @@ -35,30 +34,30 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; -import com.google.common.base.Strings; -import com.google.protobuf.InvalidProtocolBufferException; -import com.google.protobuf.ServiceException; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException; import org.apache.hadoop.ozone.om.ha.OMNodeDetails; -import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Strings; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.ServiceException; import org.apache.ratis.RaftConfigKeys; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.proto.RaftProtos.RoleInfoProto; import org.apache.ratis.proto.RaftProtos.RaftPeerRole; +import org.apache.ratis.proto.RaftProtos.RoleInfoProto; import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.GroupInfoReply; import org.apache.ratis.protocol.GroupInfoRequest; @@ -245,7 +244,7 @@ private OzoneManagerProtocolProtos.Status exceptionToResponseStatus( * @param raftPeers peer nodes in the raft ring * @throws IOException */ - private OzoneManagerRatisServer(Configuration conf, + private OzoneManagerRatisServer(ConfigurationSource conf, OzoneManager om, String raftGroupIdStr, RaftPeerId localRaftPeerId, InetSocketAddress addr, List raftPeers) @@ -295,7 +294,7 @@ public void run() { * Creates an instance of OzoneManagerRatisServer. */ public static OzoneManagerRatisServer newOMRatisServer( - Configuration ozoneConf, OzoneManager omProtocol, + ConfigurationSource ozoneConf, OzoneManager omProtocol, OMNodeDetails omNodeDetails, List peerNodes) throws IOException { @@ -370,7 +369,7 @@ public void stop() { //TODO simplify it to make it shorter @SuppressWarnings("methodlength") - private RaftProperties newRaftProperties(Configuration conf) { + private RaftProperties newRaftProperties(ConfigurationSource conf) { final RaftProperties properties = new RaftProperties(); // Set RPC type @@ -657,7 +656,7 @@ private UUID getRaftGroupIdFromOmServiceId(String omServiceId) { /** * Get the local directory where ratis logs will be stored. */ - public static String getOMRatisDirectory(Configuration conf) { + public static String getOMRatisDirectory(ConfigurationSource conf) { String storageDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_STORAGE_DIR); if (Strings.isNullOrEmpty(storageDir)) { @@ -666,7 +665,7 @@ public static String getOMRatisDirectory(Configuration conf) { return storageDir; } - public static String getOMRatisSnapshotDirectory(Configuration conf) { + public static String getOMRatisSnapshotDirectory(ConfigurationSource conf) { String snapshotDir = conf.get(OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_DIR); if (Strings.isNullOrEmpty(snapshotDir)) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java index 642bbcdf4c0..9fc1c4611df 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/OzoneManagerSnapshotProvider.java @@ -28,8 +28,8 @@ import java.util.Map; import java.util.concurrent.TimeUnit; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.server.http.HttpConfig; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.RocksDBCheckpoint; @@ -75,7 +75,7 @@ public class OzoneManagerSnapshotProvider { private static final String OM_SNAPSHOT_DB = "om.snapshot.db"; - public OzoneManagerSnapshotProvider(Configuration conf, + public OzoneManagerSnapshotProvider(ConfigurationSource conf, File omRatisSnapshotDir, List peerNodes) { LOG.info("Initializing OM Snapshot Provider"); @@ -90,7 +90,7 @@ public OzoneManagerSnapshotProvider(Configuration conf, this.httpRequestConfig = getHttpRequestConfig(conf); } - private RequestConfig getHttpRequestConfig(Configuration conf) { + private RequestConfig getHttpRequestConfig(ConfigurationSource conf) { TimeUnit socketTimeoutUnit = OZONE_OM_SNAPSHOT_PROVIDER_SOCKET_TIMEOUT_DEFAULT.getUnit(); int socketTimeoutMS = (int) conf.getTimeDuration( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java index d752ec1d379..48873cbe85c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMStorage.java @@ -18,13 +18,14 @@ import java.io.File; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.test.GenericTestUtils; import org.apache.commons.io.FileUtils; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; @@ -45,7 +46,7 @@ public void testGetOmDbDir() { final File testDir = createTestDir(); final File dbDir = new File(testDir, "omDbDir"); final File metaDir = new File(testDir, "metaDir"); // should be ignored. - final Configuration conf = new OzoneConfiguration(); + final ConfigurationSource conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_OM_DB_DIRS, dbDir.getPath()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); @@ -65,7 +66,7 @@ public void testGetOmDbDir() { public void testGetOmDbDirWithFallback() { final File testDir = createTestDir(); final File metaDir = new File(testDir, "metaDir"); - final Configuration conf = new OzoneConfiguration(); + final ConfigurationSource conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath()); try { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java index f35d3d39d7d..c7a8f207bf7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHttpServer.java @@ -18,8 +18,15 @@ package org.apache.hadoop.ozone.om; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.net.InetSocketAddress; +import java.net.URL; +import java.net.URLConnection; +import java.util.Arrays; +import java.util.Collection; + import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.http.HttpConfig.Policy; @@ -27,6 +34,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.test.GenericTestUtils; + import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -35,13 +43,6 @@ import org.junit.runners.Parameterized; import org.junit.runners.Parameterized.Parameters; -import java.io.File; -import java.net.InetSocketAddress; -import java.net.URL; -import java.net.URLConnection; -import java.util.Arrays; -import java.util.Collection; - /** * Test http server of OM with various HTTP option. */ @@ -51,7 +52,7 @@ public class TestOzoneManagerHttpServer { .getTempPath(TestOzoneManagerHttpServer.class.getSimpleName()); private static String keystoresDir; private static String sslConfDir; - private static Configuration conf; + private static OzoneConfiguration conf; private static URLConnectionFactory connectionFactory; @Parameters public static Collection policy() { @@ -73,7 +74,7 @@ public TestOzoneManagerHttpServer(Policy policy) { File base = new File(BASEDIR); FileUtil.fullyDelete(base); base.mkdirs(); - conf = new Configuration(); + conf = new OzoneConfiguration(); keystoresDir = new File(BASEDIR).getAbsolutePath(); sslConfDir = KeyStoreTestUtil.getClasspathDir( TestOzoneManagerHttpServer.class); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java index 8fbaf4f25e2..518953f91c6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOzoneTokenIdentifier.java @@ -17,6 +17,9 @@ */ package org.apache.hadoop.ozone.security; +import javax.crypto.KeyGenerator; +import javax.crypto.Mac; +import javax.crypto.SecretKey; import java.io.ByteArrayInputStream; import java.io.DataInputStream; import java.io.DataOutputStream; @@ -39,19 +42,19 @@ import java.util.Collections; import java.util.List; import java.util.Map; -import javax.crypto.KeyGenerator; -import javax.crypto.Mac; -import javax.crypto.SecretKey; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.conf.Configuration; + import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.hadoop.security.ssl.TestSSLFactory; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; + +import org.apache.commons.lang3.RandomStringUtils; +import org.apache.commons.lang3.RandomUtils; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -88,10 +91,10 @@ public static void setUp() throws Exception { base.mkdirs(); } - private Configuration createConfiguration(boolean clientCert, + private ConfigurationSource createConfiguration(boolean clientCert, boolean trustStore) throws Exception { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); KeyStoreTestUtil.setupSSLConfig(KEYSTORES_DIR, sslConfsDir, conf, clientCert, trustStore, EXCLUDE_CIPHERS); sslConfsDir = KeyStoreTestUtil.getClasspathDir(TestSSLFactory.class); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java index 935826e8315..2f005556e97 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzFs.java @@ -18,16 +18,16 @@ package org.apache.hadoop.fs.ozone; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.DelegateToFileSystem; -import org.apache.hadoop.ozone.OzoneConsts; - import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + /** * ozone implementation of AbstractFileSystem. * This impl delegates to the OzoneFileSystem diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index a93aca50eae..7316d441e88 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -25,7 +25,6 @@ import java.util.Iterator; import java.util.List; -import org.apache.commons.collections.CollectionUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.crypto.key.KeyProvider; @@ -35,6 +34,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.client.ReplicationFactor; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -57,6 +57,7 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenRenewer; +import org.apache.commons.collections.CollectionUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -112,7 +113,7 @@ public BasicOzoneClientAdapterImpl(OzoneConfiguration conf, String volumeStr, } public BasicOzoneClientAdapterImpl(String omHost, int omPort, - Configuration hadoopConf, String volumeStr, String bucketStr) + ConfigurationSource hadoopConf, String volumeStr, String bucketStr) throws IOException { ClassLoader contextClassLoader = @@ -121,7 +122,6 @@ public BasicOzoneClientAdapterImpl(String omHost, int omPort, try { OzoneConfiguration conf = OzoneConfiguration.of(hadoopConf); - if (omHost == null && OmUtils.isServiceIdsDefined(conf)) { // When the host name or service id isn't given // but ozone.om.service.ids is defined, declare failure. @@ -404,8 +404,10 @@ public long renew(Token token, Configuration conf) throws IOException, InterruptedException { Token ozoneDt = (Token) token; - OzoneClient ozoneClient = OzoneClientFactory.getOzoneClient(conf, - ozoneDt); + + OzoneClient ozoneClient = + OzoneClientFactory.getOzoneClient(OzoneConfiguration.of(conf), + ozoneDt); return ozoneClient.getObjectStore().renewDelegationToken(ozoneDt); } @@ -414,8 +416,9 @@ public void cancel(Token token, Configuration conf) throws IOException, InterruptedException { Token ozoneDt = (Token) token; - OzoneClient ozoneClient = OzoneClientFactory.getOzoneClient(conf, - ozoneDt); + OzoneClient ozoneClient = + OzoneClientFactory.getOzoneClient(OzoneConfiguration.of(conf), + ozoneDt); ozoneClient.getObjectStore().cancelDelegationToken(ozoneDt); } } diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index dbd5f913ba7..b7323acf938 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -32,8 +32,6 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CreateFlag; @@ -46,6 +44,11 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -57,7 +60,6 @@ import static org.apache.hadoop.fs.ozone.Constants.OZONE_USER_DIR; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; - import org.apache.http.client.utils.URIBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -155,7 +157,15 @@ public void initialize(URI name, Configuration conf) throws IOException { boolean isolatedClassloader = conf.getBoolean("ozone.fs.isolated-classloader", defaultValue); - this.adapter = createAdapter(conf, bucketStr, volumeStr, omHost, omPort, + ConfigurationSource source; + if (conf instanceof OzoneConfiguration) { + source = (ConfigurationSource) conf; + } else { + source = new LegacyHadoopConfigurationSource(conf); + } + this.adapter = + createAdapter(source, bucketStr, + volumeStr, omHost, omPort, isolatedClassloader); try { @@ -174,7 +184,7 @@ public void initialize(URI name, Configuration conf) throws IOException { } } - protected OzoneClientAdapter createAdapter(Configuration conf, + protected OzoneClientAdapter createAdapter(ConfigurationSource conf, String bucketStr, String volumeStr, String omHost, int omPort, boolean isolatedClassloader) throws IOException { diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java index a0ec01f6f67..5ab3a0052a9 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/O3fsDtFetcher.java @@ -21,18 +21,18 @@ import java.io.IOException; import java.net.URI; -import org.apache.hadoop.ozone.OzoneConsts; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.DtFetcher; import org.apache.hadoop.security.token.Token; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** * A DT fetcher for OzoneFileSystem. diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java index a6dd3a487c6..346b994a3ae 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzFs.java @@ -18,16 +18,16 @@ package org.apache.hadoop.fs.ozone; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.DelegateToFileSystem; -import org.apache.hadoop.ozone.OzoneConsts; - import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.DelegateToFileSystem; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; +import org.apache.hadoop.ozone.OzoneConsts; + /** * ozone implementation of AbstractFileSystem. * This impl delegates to the OzoneFileSystem diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java index 975bbf7f4ff..d19f570af45 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapterImpl.java @@ -18,7 +18,8 @@ package org.apache.hadoop.fs.ozone; import java.io.IOException; -import org.apache.hadoop.conf.Configuration; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; /** @@ -44,7 +45,7 @@ public OzoneClientAdapterImpl( } public OzoneClientAdapterImpl(String omHost, int omPort, - Configuration hadoopConf, String volumeStr, String bucketStr, + ConfigurationSource hadoopConf, String volumeStr, String bucketStr, OzoneFSStorageStatistics storageStatistics) throws IOException { super(omHost, omPort, hadoopConf, volumeStr, bucketStr); diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index a81de2cdc13..20dd72f6806 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.crypto.key.KeyProvider; import org.apache.hadoop.crypto.key.KeyProviderTokenIssuer; import org.apache.hadoop.fs.FileSystem; @@ -36,9 +36,9 @@ * The Ozone Filesystem implementation. *

* This subclass is marked as private as code should not be creating it - * directly; use {@link FileSystem#get(Configuration)} and variants to create - * one. If cast to {@link OzoneFileSystem}, extra methods and features may be - * accessed. Consider those private and unstable. + * directly; use {@link FileSystem#get(org.apache.hadoop.conf.Configuration)} + * and variants to create one. If cast to {@link OzoneFileSystem}, extra + * methods and features may be accessed. Consider those private and unstable. */ @InterfaceAudience.Private @InterfaceStability.Evolving @@ -85,7 +85,7 @@ protected void incrementCounter(Statistic statistic) { } @Override - protected OzoneClientAdapter createAdapter(Configuration conf, + protected OzoneClientAdapter createAdapter(ConfigurationSource conf, String bucketStr, String volumeStr, String omHost, int omPort, boolean isolatedClassloader) throws IOException { diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java index b36d368a331..3169ecd34fa 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.fs.ozone; -import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.shell.CommandFactory; import org.apache.hadoop.fs.shell.FsCommand; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.util.ToolRunner; /** Provide command line access to a Ozone FileSystem. */ @@ -45,7 +46,7 @@ public OzoneFsShell() { * Commands can be executed via {@link #run(String[])} * @param conf the hadoop configuration */ - public OzoneFsShell(Configuration conf) { + public OzoneFsShell(OzoneConfiguration conf) { super(conf); } diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java index 2d5581277c5..d204ad542f1 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithMocks.java @@ -17,18 +17,13 @@ */ package org.apache.hadoop.fs.ozone; -import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -38,8 +33,14 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.security.UserGroupInformation; + +import static org.junit.Assert.assertEquals; import org.junit.Test; import org.junit.runner.RunWith; +import static org.mockito.Matchers.any; +import static org.mockito.Matchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import org.powermock.api.mockito.PowerMockito; import org.powermock.core.classloader.annotations.PowerMockIgnore; import org.powermock.core.classloader.annotations.PrepareForTest; @@ -55,7 +56,7 @@ public class TestOzoneFileSystemWithMocks { @Test public void testFSUriWithHostPortOverrides() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); mockClientFactory(conf, 5899); mockUser(); @@ -71,7 +72,7 @@ public void testFSUriWithHostPortOverrides() throws Exception { @Test public void testFSUriWithHostPortUnspecified() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); final int omPort = OmUtils.getOmRpcPort(conf); mockClientFactory(conf, omPort); mockUser(); @@ -90,7 +91,7 @@ public void testFSUriWithHostPortUnspecified() throws Exception { @Test public void testFSUriHostVersionDefault() throws Exception { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); mockClientFactory(conf); mockUser(); @@ -106,7 +107,7 @@ public void testFSUriHostVersionDefault() throws Exception { @Test public void testReplicationDefaultValue() throws IOException, URISyntaxException { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); int defaultValue = conf.getInt(OzoneConfigKeys.OZONE_REPLICATION, 3); mockClientFactory(conf); mockUser(); @@ -120,7 +121,7 @@ public void testReplicationDefaultValue() @Test public void testReplicationCustomValue() throws IOException, URISyntaxException { - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); short configured = 1; conf.setInt(OzoneConfigKeys.OZONE_REPLICATION, configured); mockClientFactory(conf); @@ -144,20 +145,20 @@ private OzoneClient mockClient() throws IOException { return ozoneClient; } - private void mockClientFactory(Configuration conf, int omPort) + private void mockClientFactory(ConfigurationSource conf, int omPort) throws IOException { OzoneClient ozoneClient = mockClient(); PowerMockito.mockStatic(OzoneClientFactory.class); PowerMockito.when(OzoneClientFactory.getRpcClient(eq("local.host"), - eq(omPort), eq(conf))).thenReturn(ozoneClient); + eq(omPort), any())).thenReturn(ozoneClient); } - private void mockClientFactory(Configuration conf) throws IOException { + private void mockClientFactory(ConfigurationSource conf) throws IOException { OzoneClient ozoneClient = mockClient(); PowerMockito.mockStatic(OzoneClientFactory.class); - PowerMockito.when(OzoneClientFactory.getRpcClient(eq(conf))) + PowerMockito.when(OzoneClientFactory.getRpcClient(any())) .thenReturn(ozoneClient); } diff --git a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java index b8f8507d1bc..3659cdc7528 100644 --- a/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java +++ b/hadoop-ozone/ozonefs/src/test/java/org/apache/hadoop/fs/ozone/TestReadWriteStatistics.java @@ -18,10 +18,16 @@ package org.apache.hadoop.fs.ozone; -import static org.mockito.Mockito.*; -import static org.junit.Assert.*; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.ByteBuffer; +import java.nio.ReadOnlyBufferException; +import java.util.Arrays; +import java.util.EnumSet; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; @@ -29,18 +35,23 @@ import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; import org.junit.Before; import org.junit.Test; - -import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.ByteBuffer; -import java.nio.ReadOnlyBufferException; -import java.util.Arrays; -import java.util.EnumSet; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyBoolean; +import static org.mockito.Mockito.anyInt; +import static org.mockito.Mockito.anyShort; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.when; /** * Tests to check if bytes read and written and corresponding read and write @@ -404,14 +415,14 @@ private void setupAdapterToReturnFakeOutputStreamOnCreate() throws Exception { } private void setupFileSystemToUseFakeClientAdapter() throws IOException { - doReturn(fakeAdapter).when(fs).createAdapter(any(Configuration.class), + doReturn(fakeAdapter).when(fs).createAdapter(any(ConfigurationSource.class), anyString(), anyString(), anyString(), anyInt(), anyBoolean()); } private void initializeFS() throws IOException, URISyntaxException { FileSystem.getGlobalStorageStatistics().reset(); URI fsUri = new URI("o3fs://volume.bucket.localhost"); - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); fs.initialize(fsUri, conf); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java index 5a6a916b046..97a4de6d2f0 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ConfigurationProvider.java @@ -17,11 +17,12 @@ */ package org.apache.hadoop.ozone.recon; -import com.google.common.annotations.VisibleForTesting; -import com.google.inject.Provider; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import com.google.common.annotations.VisibleForTesting; +import com.google.inject.Provider; + /** * Ozone Configuration Provider. *

diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java index 09507388b97..a29a35e7b65 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconUtils.java @@ -18,10 +18,6 @@ package org.apache.hadoop.ozone.recon; -import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; -import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; - import java.io.BufferedInputStream; import java.io.BufferedOutputStream; import java.io.File; @@ -29,24 +25,27 @@ import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.net.URL; +import java.net.URLConnection; import java.nio.file.Path; import java.nio.file.Paths; import java.util.zip.GZIPOutputStream; -import java.net.URLConnection; -import java.net.URL; -import org.apache.commons.compress.archivers.tar.TarArchiveEntry; -import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; -import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; -import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.IOUtils; +import org.apache.commons.compress.archivers.tar.TarArchiveEntry; +import org.apache.commons.compress.archivers.tar.TarArchiveInputStream; +import org.apache.commons.compress.archivers.tar.TarArchiveOutputStream; +import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream; +import static org.apache.hadoop.hdds.server.ServerUtils.getDirectoryFromConfig; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hdfs.web.URLConnectionFactory; /** * Recon Utility class. @@ -61,7 +60,7 @@ public ReconUtils() { private static final Logger LOG = LoggerFactory.getLogger( ReconUtils.class); - public static File getReconScmDbDir(Configuration conf) { + public static File getReconScmDbDir(ConfigurationSource conf) { return new ReconUtils().getReconDbDir(conf, OZONE_RECON_SCM_DB_DIR); } @@ -73,7 +72,7 @@ public static File getReconScmDbDir(Configuration conf) { * @param dirConfigKey key to check * @return Return File based on configured or fallback value. */ - public File getReconDbDir(Configuration conf, String dirConfigKey) { + public File getReconDbDir(ConfigurationSource conf, String dirConfigKey) { File metadataDir = getDirectoryFromConfig(conf, dirConfigKey, "Recon"); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index 754b525dabd..20afa096a43 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -18,12 +18,10 @@ package org.apache.hadoop.ozone.recon.scm; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_CONTAINER_DB; - import java.io.File; import java.io.IOException; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; @@ -35,6 +33,8 @@ import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.persistence.ContainerSchemaManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; + +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_CONTAINER_DB; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -56,12 +56,12 @@ public class ReconContainerManager extends SCMContainerManager { * CacheSize is specified * in MB. * - * @param conf - {@link Configuration} + * @param conf - {@link ConfigurationSource} * @param pipelineManager - {@link PipelineManager} * @throws IOException on Failure. */ public ReconContainerManager( - Configuration conf, PipelineManager pipelineManager, + ConfigurationSource conf, PipelineManager pipelineManager, StorageContainerServiceProvider scm, ContainerSchemaManager containerSchemaManager) throws IOException { super(conf, pipelineManager); @@ -70,7 +70,7 @@ public ReconContainerManager( } @Override - protected File getContainerDBPath(Configuration conf) { + protected File getContainerDBPath(ConfigurationSource conf) { File metaDir = ReconUtils.getReconScmDbDir(conf); return new File(metaDir, RECON_SCM_CONTAINER_DB); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java index 9a3d5181f15..60e8a0635eb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java @@ -18,11 +18,6 @@ package org.apache.hadoop.ozone.recon.scm; -import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_NODE_DB; - import java.io.File; import java.io.IOException; import java.util.HashMap; @@ -31,8 +26,8 @@ import java.util.Set; import java.util.UUID; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.StringUtils; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -48,10 +43,14 @@ import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.util.Time; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.collect.ImmutableSet; +import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_NODE_DB; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Recon SCM's Node manager that includes persistence. @@ -117,7 +116,7 @@ public void addNodeToDB(DatanodeDetails datanodeDetails) throws IOException { LOG.info("Adding new node {} to Node DB.", datanodeDetails.getUuid()); } - protected File getNodeDBPath(Configuration conf) { + protected File getNodeDBPath(ConfigurationSource conf) { File metaDir = ReconUtils.getReconScmDbDir(conf); return new File(metaDir, RECON_SCM_NODE_DB); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java index 2405ad22301..20f77c77b93 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java @@ -18,15 +18,12 @@ package org.apache.hadoop.ozone.recon.scm; -import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED; -import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_PIPELINE_DB; - import java.io.File; import java.io.IOException; import java.util.List; import java.util.stream.Collectors; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -35,10 +32,12 @@ import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.recon.ReconUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import com.google.common.annotations.VisibleForTesting; +import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.CLOSED; +import static org.apache.hadoop.ozone.recon.ReconConstants.RECON_SCM_PIPELINE_DB; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Recon's overriding implementation of SCM's Pipeline Manager. @@ -48,7 +47,7 @@ public class ReconPipelineManager extends SCMPipelineManager { private static final Logger LOG = LoggerFactory.getLogger(ReconPipelineManager.class); - public ReconPipelineManager(Configuration conf, + public ReconPipelineManager(ConfigurationSource conf, NodeManager nodeManager, EventPublisher eventPublisher) throws IOException { @@ -58,7 +57,7 @@ public ReconPipelineManager(Configuration conf, } @Override - protected File getPipelineDBPath(Configuration conf) { + protected File getPipelineDBPath(ConfigurationSource conf) { File metaDir = ReconUtils.getReconScmDbDir(conf); return new File(metaDir, RECON_SCM_PIPELINE_DB); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java index e3b8c02fe58..246d9baf74e 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineReportHandler.java @@ -20,7 +20,7 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -46,7 +46,7 @@ public class ReconPipelineReportHandler extends PipelineReportHandler { public ReconPipelineReportHandler(SafeModeManager scmSafeModeManager, PipelineManager pipelineManager, - Configuration conf, + ConfigurationSource conf, StorageContainerServiceProvider scmServiceProvider) { super(scmSafeModeManager, pipelineManager, conf); this.scmServiceProvider = scmServiceProvider; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java index b164e54d7d8..9a7d737eeb5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.server.http.BaseHttpServer; /** @@ -32,7 +32,7 @@ public class S3GatewayHttpServer extends BaseHttpServer { */ public static final int FILTER_PRIORITY_DO_AFTER = 50; - public S3GatewayHttpServer(Configuration conf, + public S3GatewayHttpServer(ConfigurationSource conf, String name) throws IOException { super(conf, name); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java index 3df8a095143..eebc3884ff3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java @@ -19,10 +19,10 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; @@ -105,7 +105,7 @@ public ScmClient createScmClient() { } } - private void checkAndSetSCMAddressArg(Configuration conf) { + private void checkAndSetSCMAddressArg(ConfigurationSource conf) { if (StringUtils.isNotEmpty(scm)) { conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scm); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java index c8deb98759e..e4e6b1f7fa4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonHttpServer.java @@ -19,7 +19,7 @@ import java.io.IOException; -import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.server.http.BaseHttpServer; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -27,7 +27,7 @@ * Http server to provide metrics + profile endpoint. */ public class FreonHttpServer extends BaseHttpServer { - public FreonHttpServer(Configuration conf) throws IOException { + public FreonHttpServer(ConfigurationSource conf) throws IOException { super(conf, "freon"); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java index 50b0c8e7987..d7b357600fd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchMarkDatanodeDispatcher.java @@ -26,7 +26,6 @@ import java.util.UUID; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -45,13 +44,13 @@ import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import com.google.common.collect.Maps; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.io.FileUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; @@ -92,7 +91,7 @@ public void initialize() throws IOException { // 1 MB of data data = ByteString.copyFromUtf8(RandomStringUtils.randomAscii(CHUNK_SIZE)); random = new Random(); - Configuration conf = new OzoneConfiguration(); + OzoneConfiguration conf = new OzoneConfiguration(); baseDir = System.getProperty("java.io.tmpdir") + File.separator + datanodeUuid; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java index 0a80ad3660b..c9d2b5e7a28 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/BenchmarkChunkManager.java @@ -17,8 +17,14 @@ */ package org.apache.hadoop.ozone.genesis; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.util.UUID; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; @@ -32,9 +38,15 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; +import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; + +import static java.nio.charset.StandardCharsets.UTF_8; +import org.apache.commons.io.FileUtils; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; +import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; +import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.Measurement; @@ -46,19 +58,6 @@ import org.openjdk.jmh.annotations.Warmup; import org.openjdk.jmh.infra.Blackhole; -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.file.Files; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK; - /** * Benchmark for ChunkManager implementations. */ @@ -99,7 +98,7 @@ public static class BenchmarkState { private File dir; private ChunkBuffer buffer; private VolumeSet volumeSet; - private Configuration config; + private OzoneConfiguration config; private static File getTestDir() throws IOException { File dir = new File(DEFAULT_TEST_DATA_DIR).getAbsoluteFile(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java index 25ceb4ce5e3..e98cabca6b7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java @@ -17,9 +17,17 @@ */ package org.apache.hadoop.ozone.genesis; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.conf.Configuration; +import java.io.File; +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Random; +import java.util.UUID; + import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -30,24 +38,16 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.hadoop.hdds.utils.MetadataStore; +import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Storage; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import org.apache.hadoop.hdds.utils.MetadataStore; -import org.apache.hadoop.hdds.utils.MetadataStoreBuilder; - -import java.io.File; -import java.io.IOException; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.ArrayList; -import java.util.List; -import java.util.Random; -import java.util.UUID; +import org.apache.commons.lang3.RandomStringUtils; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB; import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB; @@ -77,7 +77,7 @@ public static Path getTempPath() { public static MetadataStore getMetadataStore(String dbType) throws IOException { - Configuration conf = new Configuration(); + OzoneConfiguration conf = new OzoneConfiguration(); MetadataStoreBuilder builder = MetadataStoreBuilder.newBuilder(); builder.setConf(conf); builder.setCreateIfMissing(true); @@ -137,7 +137,7 @@ static StorageContainerManager getScm(OzoneConfiguration conf, return new StorageContainerManager(conf, configurator); } - static void configureSCM(Configuration conf, int numHandlers) { + static void configureSCM(OzoneConfiguration conf, int numHandlers) { conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS); conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, @@ -150,7 +150,7 @@ static void configureSCM(Configuration conf, int numHandlers) { } static void addPipelines(HddsProtos.ReplicationFactor factor, - int numPipelines, Configuration conf) throws IOException { + int numPipelines, ConfigurationSource conf) throws IOException { final File metaDir = ServerUtils.getScmDbDir(conf); final File pipelineDBPath = new File(metaDir, SCM_PIPELINE_DB); int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB, @@ -194,7 +194,7 @@ static OzoneManager getOm(OzoneConfiguration conf) return OzoneManager.createOm(conf); } - static void configureOM(Configuration conf, int numHandlers) { + static void configureOM(OzoneConfiguration conf, int numHandlers) { conf.set(OMConfigKeys.OZONE_OM_HTTP_ADDRESS_KEY, RANDOM_LOCAL_ADDRESS); conf.setInt(OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY, numHandlers);