methodNameToPolicyMap) {
this.proxyProvider = proxyProvider;
- this.defaultPolicy = RetryPolicies.TRY_ONCE_THEN_FAIL;
+ this.defaultPolicy = defaultPolicy;
this.methodNameToPolicyMap = methodNameToPolicyMap;
this.currentProxy = proxyProvider.getProxy();
}
+ @Override
public Object invoke(Object proxy, Method method, Object[] args)
throws Throwable {
RetryPolicy policy = methodNameToPolicyMap.get(method.getName());
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
index 2be8b75999..8b8387ce2c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
@@ -22,10 +22,13 @@
import java.net.NoRouteToHostException;
import java.net.SocketException;
import java.net.UnknownHostException;
+import java.util.ArrayList;
+import java.util.Collections;
import java.util.HashMap;
+import java.util.List;
import java.util.Map;
-import java.util.Random;
import java.util.Map.Entry;
+import java.util.Random;
import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
@@ -33,8 +36,6 @@
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.StandbyException;
-import com.google.common.annotations.VisibleForTesting;
-
/**
*
* A collection of useful implementations of {@link RetryPolicy}.
@@ -44,7 +45,12 @@ public class RetryPolicies {
public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
- private static final Random RAND = new Random();
+ private static ThreadLocal RANDOM = new ThreadLocal() {
+ @Override
+ protected Random initialValue() {
+ return new Random();
+ }
+ };
/**
*
@@ -157,17 +163,35 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers,
}
}
+ /**
+ * Retry up to maxRetries.
+ * The actual sleep time of the n-th retry is f(n, sleepTime),
+ * where f is a function provided by the subclass implementation.
+ *
+ * The object of the subclasses should be immutable;
+ * otherwise, the subclass must override hashCode(), equals(..) and toString().
+ */
static abstract class RetryLimited implements RetryPolicy {
- int maxRetries;
- long sleepTime;
- TimeUnit timeUnit;
+ final int maxRetries;
+ final long sleepTime;
+ final TimeUnit timeUnit;
- public RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
+ private String myString;
+
+ RetryLimited(int maxRetries, long sleepTime, TimeUnit timeUnit) {
+ if (maxRetries < 0) {
+ throw new IllegalArgumentException("maxRetries = " + maxRetries+" < 0");
+ }
+ if (sleepTime < 0) {
+ throw new IllegalArgumentException("sleepTime = " + sleepTime + " < 0");
+ }
+
this.maxRetries = maxRetries;
this.sleepTime = sleepTime;
this.timeUnit = timeUnit;
}
+ @Override
public RetryAction shouldRetry(Exception e, int retries, int failovers,
boolean isMethodIdempotent) throws Exception {
if (retries >= maxRetries) {
@@ -178,6 +202,30 @@ public RetryAction shouldRetry(Exception e, int retries, int failovers,
}
protected abstract long calculateSleepTime(int retries);
+
+ @Override
+ public int hashCode() {
+ return toString().hashCode();
+ }
+
+ @Override
+ public boolean equals(final Object that) {
+ if (this == that) {
+ return true;
+ } else if (that == null || this.getClass() != that.getClass()) {
+ return false;
+ }
+ return this.toString().equals(that.toString());
+ }
+
+ @Override
+ public String toString() {
+ if (myString == null) {
+ myString = getClass().getSimpleName() + "(maxRetries=" + maxRetries
+ + ", sleepTime=" + sleepTime + " " + timeUnit + ")";
+ }
+ return myString;
+ }
}
static class RetryUpToMaximumCountWithFixedSleep extends RetryLimited {
@@ -208,6 +256,169 @@ protected long calculateSleepTime(int retries) {
}
}
+ /**
+ * Given pairs of number of retries and sleep time (n0, t0), (n1, t1), ...,
+ * the first n0 retries sleep t0 milliseconds on average,
+ * the following n1 retries sleep t1 milliseconds on average, and so on.
+ *
+ * For all the sleep, the actual sleep time is randomly uniform distributed
+ * in the close interval [0.5t, 1.5t], where t is the sleep time specified.
+ *
+ * The objects of this class are immutable.
+ */
+ public static class MultipleLinearRandomRetry implements RetryPolicy {
+ /** Pairs of numRetries and sleepSeconds */
+ public static class Pair {
+ final int numRetries;
+ final int sleepMillis;
+
+ public Pair(final int numRetries, final int sleepMillis) {
+ if (numRetries < 0) {
+ throw new IllegalArgumentException("numRetries = " + numRetries+" < 0");
+ }
+ if (sleepMillis < 0) {
+ throw new IllegalArgumentException("sleepMillis = " + sleepMillis + " < 0");
+ }
+
+ this.numRetries = numRetries;
+ this.sleepMillis = sleepMillis;
+ }
+
+ @Override
+ public String toString() {
+ return numRetries + "x" + sleepMillis + "ms";
+ }
+ }
+
+ private final List pairs;
+ private String myString;
+
+ public MultipleLinearRandomRetry(List pairs) {
+ if (pairs == null || pairs.isEmpty()) {
+ throw new IllegalArgumentException("pairs must be neither null nor empty.");
+ }
+ this.pairs = Collections.unmodifiableList(pairs);
+ }
+
+ @Override
+ public RetryAction shouldRetry(Exception e, int curRetry, int failovers,
+ boolean isMethodIdempotent) throws Exception {
+ final Pair p = searchPair(curRetry);
+ if (p == null) {
+ //no more retries.
+ return RetryAction.FAIL;
+ }
+
+ //calculate sleep time and return.
+ final double ratio = RANDOM.get().nextDouble() + 0.5;//0.5 <= ratio <=1.5
+ final long sleepTime = Math.round(p.sleepMillis * ratio);
+ return new RetryAction(RetryAction.RetryDecision.RETRY, sleepTime);
+ }
+
+ /**
+ * Given the current number of retry, search the corresponding pair.
+ * @return the corresponding pair,
+ * or null if the current number of retry > maximum number of retry.
+ */
+ private Pair searchPair(int curRetry) {
+ int i = 0;
+ for(; i < pairs.size() && curRetry > pairs.get(i).numRetries; i++) {
+ curRetry -= pairs.get(i).numRetries;
+ }
+ return i == pairs.size()? null: pairs.get(i);
+ }
+
+ @Override
+ public int hashCode() {
+ return toString().hashCode();
+ }
+
+ @Override
+ public boolean equals(final Object that) {
+ if (this == that) {
+ return true;
+ } else if (that == null || this.getClass() != that.getClass()) {
+ return false;
+ }
+ return this.toString().equals(that.toString());
+ }
+
+ @Override
+ public String toString() {
+ if (myString == null) {
+ myString = getClass().getSimpleName() + pairs;
+ }
+ return myString;
+ }
+
+ /**
+ * Parse the given string as a MultipleLinearRandomRetry object.
+ * The format of the string is "t_1, n_1, t_2, n_2, ...",
+ * where t_i and n_i are the i-th pair of sleep time and number of retires.
+ * Note that the white spaces in the string are ignored.
+ *
+ * @return the parsed object, or null if the parsing fails.
+ */
+ public static MultipleLinearRandomRetry parseCommaSeparatedString(String s) {
+ final String[] elements = s.split(",");
+ if (elements.length == 0) {
+ LOG.warn("Illegal value: there is no element in \"" + s + "\".");
+ return null;
+ }
+ if (elements.length % 2 != 0) {
+ LOG.warn("Illegal value: the number of elements in \"" + s + "\" is "
+ + elements.length + " but an even number of elements is expected.");
+ return null;
+ }
+
+ final List pairs
+ = new ArrayList();
+
+ for(int i = 0; i < elements.length; ) {
+ //parse the i-th sleep-time
+ final int sleep = parsePositiveInt(elements, i++, s);
+ if (sleep == -1) {
+ return null; //parse fails
+ }
+
+ //parse the i-th number-of-retries
+ final int retries = parsePositiveInt(elements, i++, s);
+ if (retries == -1) {
+ return null; //parse fails
+ }
+
+ pairs.add(new RetryPolicies.MultipleLinearRandomRetry.Pair(retries, sleep));
+ }
+ return new RetryPolicies.MultipleLinearRandomRetry(pairs);
+ }
+
+ /**
+ * Parse the i-th element as an integer.
+ * @return -1 if the parsing fails or the parsed value <= 0;
+ * otherwise, return the parsed value.
+ */
+ private static int parsePositiveInt(final String[] elements,
+ final int i, final String originalString) {
+ final String s = elements[i].trim();
+ final int n;
+ try {
+ n = Integer.parseInt(s);
+ } catch(NumberFormatException nfe) {
+ LOG.warn("Failed to parse \"" + s + "\", which is the index " + i
+ + " element in \"" + originalString + "\"", nfe);
+ return -1;
+ }
+
+ if (n <= 0) {
+ LOG.warn("The value " + n + " <= 0: it is parsed from the string \""
+ + s + "\" which is the index " + i + " element in \""
+ + originalString + "\"");
+ return -1;
+ }
+ return n;
+ }
+ }
+
static class ExceptionDependentRetry implements RetryPolicy {
RetryPolicy defaultPolicy;
@@ -265,6 +476,14 @@ static class ExponentialBackoffRetry extends RetryLimited {
public ExponentialBackoffRetry(
int maxRetries, long sleepTime, TimeUnit timeUnit) {
super(maxRetries, sleepTime, timeUnit);
+
+ if (maxRetries < 0) {
+ throw new IllegalArgumentException("maxRetries = " + maxRetries + " < 0");
+ } else if (maxRetries >= Long.SIZE - 1) {
+ //calculateSleepTime may overflow.
+ throw new IllegalArgumentException("maxRetries = " + maxRetries
+ + " >= " + (Long.SIZE - 1));
+ }
}
@Override
@@ -353,11 +572,10 @@ public RetryAction shouldRetry(Exception e, int retries,
* @param cap value at which to cap the base sleep time
* @return an amount of time to sleep
*/
- @VisibleForTesting
- public static long calculateExponentialTime(long time, int retries,
+ private static long calculateExponentialTime(long time, int retries,
long cap) {
- long baseTime = Math.min(time * ((long)1 << retries), cap);
- return (long) (baseTime * (RAND.nextFloat() + 0.5));
+ long baseTime = Math.min(time * (1L << retries), cap);
+ return (long) (baseTime * (RANDOM.get().nextDouble() + 0.5));
}
private static long calculateExponentialTime(long time, int retries) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
index ed673e950f..e1f3899457 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
@@ -60,6 +60,12 @@ public RetryAction(RetryDecision action, long delayTime, String reason) {
this.reason = reason;
}
+ @Override
+ public String toString() {
+ return getClass().getSimpleName() + "(action=" + action
+ + ", delayMillis=" + delayMillis + ", reason=" + reason + ")";
+ }
+
public enum RetryDecision {
FAIL,
RETRY,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
index 13e8a41eba..3cc6a2ec2d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryProxy.java
@@ -75,9 +75,10 @@ public static Object create(Class> iface, FailoverProxyProvider proxyProvider,
*/
public static Object create(Class> iface, Object implementation,
Map methodNameToPolicyMap) {
- return RetryProxy.create(iface,
+ return create(iface,
new DefaultFailoverProxyProvider(iface, implementation),
- methodNameToPolicyMap);
+ methodNameToPolicyMap,
+ RetryPolicies.TRY_ONCE_THEN_FAIL);
}
/**
@@ -92,11 +93,13 @@ public static Object create(Class> iface, Object implementation,
* @return the retry proxy
*/
public static Object create(Class> iface, FailoverProxyProvider proxyProvider,
- Map methodNameToPolicyMap) {
+ Map methodNameToPolicyMap,
+ RetryPolicy defaultPolicy) {
return Proxy.newProxyInstance(
proxyProvider.getInterface().getClassLoader(),
new Class>[] { iface },
- new RetryInvocationHandler(proxyProvider, methodNameToPolicyMap)
+ new RetryInvocationHandler(proxyProvider, defaultPolicy,
+ methodNameToPolicyMap)
);
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index ef32cfde3a..d382c99f61 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -18,47 +18,51 @@
package org.apache.hadoop.ipc;
-import java.net.InetAddress;
-import java.net.Socket;
-import java.net.InetSocketAddress;
-import java.net.SocketTimeoutException;
-import java.net.UnknownHostException;
-import java.io.IOException;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
import java.io.FilterInputStream;
+import java.io.IOException;
import java.io.InputStream;
+import java.io.InterruptedIOException;
import java.io.OutputStream;
-
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.net.SocketTimeoutException;
+import java.net.UnknownHostException;
import java.security.PrivilegedExceptionAction;
import java.util.Hashtable;
import java.util.Iterator;
+import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
-import java.util.Map.Entry;
+import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicLong;
import javax.net.SocketFactory;
-import org.apache.commons.logging.*;
-
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.SaslRpcClient;
@@ -67,8 +71,8 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.util.ProtoUtil;
import org.apache.hadoop.util.ReflectionUtils;
@@ -80,8 +84,8 @@
*/
public class Client {
- public static final Log LOG =
- LogFactory.getLog(Client.class);
+ public static final Log LOG = LogFactory.getLog(Client.class);
+
private Hashtable connections =
new Hashtable();
@@ -228,8 +232,7 @@ private class Connection extends Thread {
private int rpcTimeout;
private int maxIdleTime; //connections will be culled if it was idle for
//maxIdleTime msecs
- private int maxRetries; //the max. no. of retries for socket connections
- // the max. no. of retries for socket connections on time out exceptions
+ private final RetryPolicy connectionRetryPolicy;
private int maxRetriesOnSocketTimeouts;
private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
private boolean doPing; //do we need to send ping message
@@ -253,7 +256,7 @@ public Connection(ConnectionId remoteId) throws IOException {
}
this.rpcTimeout = remoteId.getRpcTimeout();
this.maxIdleTime = remoteId.getMaxIdleTime();
- this.maxRetries = remoteId.getMaxRetries();
+ this.connectionRetryPolicy = remoteId.connectionRetryPolicy;
this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
this.tcpNoDelay = remoteId.getTcpNoDelay();
this.doPing = remoteId.getDoPing();
@@ -488,7 +491,7 @@ private synchronized void setupConnection() throws IOException {
if (updateAddress()) {
timeoutFailures = ioFailures = 0;
}
- handleConnectionFailure(ioFailures++, maxRetries, ie);
+ handleConnectionFailure(ioFailures++, ie);
}
}
}
@@ -680,8 +683,36 @@ private void handleConnectionFailure(
Thread.sleep(1000);
} catch (InterruptedException ignored) {}
- LOG.info("Retrying connect to server: " + server +
- ". Already tried " + curRetries + " time(s).");
+ LOG.info("Retrying connect to server: " + server + ". Already tried "
+ + curRetries + " time(s); maxRetries=" + maxRetries);
+ }
+
+ private void handleConnectionFailure(int curRetries, IOException ioe
+ ) throws IOException {
+ closeConnection();
+
+ final RetryAction action;
+ try {
+ action = connectionRetryPolicy.shouldRetry(ioe, curRetries, 0, true);
+ } catch(Exception e) {
+ throw e instanceof IOException? (IOException)e: new IOException(e);
+ }
+ if (action.action == RetryAction.RetryDecision.FAIL) {
+ if (action.reason != null) {
+ LOG.warn("Failed to connect to server: " + server + ": "
+ + action.reason, ioe);
+ }
+ throw ioe;
+ }
+
+ try {
+ Thread.sleep(action.delayMillis);
+ } catch (InterruptedException e) {
+ throw (IOException)new InterruptedIOException("Interrupted: action="
+ + action + ", retry policy=" + connectionRetryPolicy).initCause(e);
+ }
+ LOG.info("Retrying connect to server: " + server + ". Already tried "
+ + curRetries + " time(s); retry policy is " + connectionRetryPolicy);
}
/**
@@ -849,6 +880,10 @@ private void receiveResponse() {
try {
RpcResponseHeaderProto response =
RpcResponseHeaderProto.parseDelimitedFrom(in);
+ if (response == null) {
+ throw new IOException("Response is null.");
+ }
+
int callId = response.getCallId();
if (LOG.isDebugEnabled())
LOG.debug(getName() + " got value #" + callId);
@@ -1287,7 +1322,7 @@ public static class ConnectionId {
private final String serverPrincipal;
private final int maxIdleTime; //connections will be culled if it was idle for
//maxIdleTime msecs
- private final int maxRetries; //the max. no. of retries for socket connections
+ private final RetryPolicy connectionRetryPolicy;
// the max. no. of retries for socket connections on time out exceptions
private final int maxRetriesOnSocketTimeouts;
private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
@@ -1297,7 +1332,7 @@ public static class ConnectionId {
ConnectionId(InetSocketAddress address, Class> protocol,
UserGroupInformation ticket, int rpcTimeout,
String serverPrincipal, int maxIdleTime,
- int maxRetries, int maxRetriesOnSocketTimeouts,
+ RetryPolicy connectionRetryPolicy, int maxRetriesOnSocketTimeouts,
boolean tcpNoDelay, boolean doPing, int pingInterval) {
this.protocol = protocol;
this.address = address;
@@ -1305,7 +1340,7 @@ public static class ConnectionId {
this.rpcTimeout = rpcTimeout;
this.serverPrincipal = serverPrincipal;
this.maxIdleTime = maxIdleTime;
- this.maxRetries = maxRetries;
+ this.connectionRetryPolicy = connectionRetryPolicy;
this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
this.tcpNoDelay = tcpNoDelay;
this.doPing = doPing;
@@ -1336,10 +1371,6 @@ int getMaxIdleTime() {
return maxIdleTime;
}
- int getMaxRetries() {
- return maxRetries;
- }
-
/** max connection retries on socket time outs */
public int getMaxRetriesOnSocketTimeouts() {
return maxRetriesOnSocketTimeouts;
@@ -1357,6 +1388,12 @@ int getPingInterval() {
return pingInterval;
}
+ static ConnectionId getConnectionId(InetSocketAddress addr,
+ Class> protocol, UserGroupInformation ticket, int rpcTimeout,
+ Configuration conf) throws IOException {
+ return getConnectionId(addr, protocol, ticket, rpcTimeout, null, conf);
+ }
+
/**
* Returns a ConnectionId object.
* @param addr Remote address for the connection.
@@ -1367,9 +1404,18 @@ int getPingInterval() {
* @return A ConnectionId instance
* @throws IOException
*/
- public static ConnectionId getConnectionId(InetSocketAddress addr,
+ static ConnectionId getConnectionId(InetSocketAddress addr,
Class> protocol, UserGroupInformation ticket, int rpcTimeout,
- Configuration conf) throws IOException {
+ RetryPolicy connectionRetryPolicy, Configuration conf) throws IOException {
+
+ if (connectionRetryPolicy == null) {
+ final int max = conf.getInt(
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
+ CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT);
+ connectionRetryPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+ max, 1, TimeUnit.SECONDS);
+ }
+
String remotePrincipal = getRemotePrincipal(conf, addr, protocol);
boolean doPing =
conf.getBoolean(CommonConfigurationKeys.IPC_CLIENT_PING_KEY, true);
@@ -1377,8 +1423,7 @@ public static ConnectionId getConnectionId(InetSocketAddress addr,
rpcTimeout, remotePrincipal,
conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
- conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
- CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
+ connectionRetryPolicy,
conf.getInt(
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT),
@@ -1421,7 +1466,7 @@ public boolean equals(Object obj) {
return isEqual(this.address, that.address)
&& this.doPing == that.doPing
&& this.maxIdleTime == that.maxIdleTime
- && this.maxRetries == that.maxRetries
+ && isEqual(this.connectionRetryPolicy, that.connectionRetryPolicy)
&& this.pingInterval == that.pingInterval
&& isEqual(this.protocol, that.protocol)
&& this.rpcTimeout == that.rpcTimeout
@@ -1434,11 +1479,10 @@ && isEqual(this.serverPrincipal, that.serverPrincipal)
@Override
public int hashCode() {
- int result = 1;
+ int result = connectionRetryPolicy.hashCode();
result = PRIME * result + ((address == null) ? 0 : address.hashCode());
result = PRIME * result + (doPing ? 1231 : 1237);
result = PRIME * result + maxIdleTime;
- result = PRIME * result + maxRetries;
result = PRIME * result + pingInterval;
result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
result = PRIME * result + rpcTimeout;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
index 1338419a17..d355a85d4f 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
@@ -36,9 +36,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataOutputOutputStream;
import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
-
import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
@@ -66,15 +66,24 @@ public class ProtobufRpcEngine implements RpcEngine {
private static final ClientCache CLIENTS = new ClientCache();
+ public ProtocolProxy getProxy(Class protocol, long clientVersion,
+ InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
+ SocketFactory factory, int rpcTimeout) throws IOException {
+ return getProxy(protocol, clientVersion, addr, ticket, conf, factory,
+ rpcTimeout, null);
+ }
+
@Override
@SuppressWarnings("unchecked")
public ProtocolProxy getProxy(Class protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
- SocketFactory factory, int rpcTimeout) throws IOException {
+ SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+ ) throws IOException {
- return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance(protocol
- .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
- addr, ticket, conf, factory, rpcTimeout)), false);
+ final Invoker invoker = new Invoker(protocol, addr, ticket, conf, factory,
+ rpcTimeout, connectionRetryPolicy);
+ return new ProtocolProxy(protocol, (T) Proxy.newProxyInstance(
+ protocol.getClassLoader(), new Class[]{protocol}, invoker), false);
}
@Override
@@ -97,11 +106,12 @@ private static class Invoker implements RpcInvocationHandler {
private final long clientProtocolVersion;
private final String protocolName;
- public Invoker(Class> protocol, InetSocketAddress addr,
+ private Invoker(Class> protocol, InetSocketAddress addr,
UserGroupInformation ticket, Configuration conf, SocketFactory factory,
- int rpcTimeout) throws IOException {
- this(protocol, Client.ConnectionId.getConnectionId(addr, protocol,
- ticket, rpcTimeout, conf), conf, factory);
+ int rpcTimeout, RetryPolicy connectionRetryPolicy) throws IOException {
+ this(protocol, Client.ConnectionId.getConnectionId(
+ addr, protocol, ticket, rpcTimeout, connectionRetryPolicy, conf),
+ conf, factory);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
index 56fbd7d5a1..6a8a71f83a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
@@ -41,6 +41,7 @@
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
import org.apache.hadoop.net.NetUtils;
@@ -326,7 +327,7 @@ public static ProtocolProxy waitForProtocolProxy(Class protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
long connTimeout) throws IOException {
- return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, connTimeout);
+ return waitForProtocolProxy(protocol, clientVersion, addr, conf, 0, null, connTimeout);
}
/**
@@ -347,7 +348,7 @@ public static T waitForProxy(Class protocol,
int rpcTimeout,
long timeout) throws IOException {
return waitForProtocolProxy(protocol, clientVersion, addr,
- conf, rpcTimeout, timeout).getProxy();
+ conf, rpcTimeout, null, timeout).getProxy();
}
/**
@@ -367,6 +368,7 @@ public static ProtocolProxy waitForProtocolProxy(Class protocol,
long clientVersion,
InetSocketAddress addr, Configuration conf,
int rpcTimeout,
+ RetryPolicy connectionRetryPolicy,
long timeout) throws IOException {
long startTime = System.currentTimeMillis();
IOException ioe;
@@ -374,7 +376,7 @@ public static ProtocolProxy waitForProtocolProxy(Class protocol,
try {
return getProtocolProxy(protocol, clientVersion, addr,
UserGroupInformation.getCurrentUser(), conf, NetUtils
- .getDefaultSocketFactory(conf), rpcTimeout);
+ .getDefaultSocketFactory(conf), rpcTimeout, connectionRetryPolicy);
} catch(ConnectException se) { // namenode has not been started
LOG.info("Server at " + addr + " not available yet, Zzzzz...");
ioe = se;
@@ -463,7 +465,7 @@ public static ProtocolProxy getProtocolProxy(Class protocol,
Configuration conf,
SocketFactory factory) throws IOException {
return getProtocolProxy(
- protocol, clientVersion, addr, ticket, conf, factory, 0);
+ protocol, clientVersion, addr, ticket, conf, factory, 0, null);
}
/**
@@ -489,7 +491,7 @@ public static T getProxy(Class protocol,
SocketFactory factory,
int rpcTimeout) throws IOException {
return getProtocolProxy(protocol, clientVersion, addr, ticket,
- conf, factory, rpcTimeout).getProxy();
+ conf, factory, rpcTimeout, null).getProxy();
}
/**
@@ -512,12 +514,13 @@ public static ProtocolProxy getProtocolProxy(Class protocol,
UserGroupInformation ticket,
Configuration conf,
SocketFactory factory,
- int rpcTimeout) throws IOException {
+ int rpcTimeout,
+ RetryPolicy connectionRetryPolicy) throws IOException {
if (UserGroupInformation.isSecurityEnabled()) {
SaslRpcServer.init(conf);
}
- return getProtocolEngine(protocol,conf).getProxy(protocol,
- clientVersion, addr, ticket, conf, factory, rpcTimeout);
+ return getProtocolEngine(protocol,conf).getProxy(protocol, clientVersion,
+ addr, ticket, conf, factory, rpcTimeout, connectionRetryPolicy);
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
index d431b4a898..f74aa881d2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RemoteException.java
@@ -97,8 +97,9 @@ public static RemoteException valueOf(Attributes attrs) {
return new RemoteException(attrs.getValue("class"),
attrs.getValue("message"));
}
-
+
+ @Override
public String toString() {
- return className + ": " + getMessage();
+ return getClass().getName() + "(" + className + "): " + getMessage();
}
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
index 09980da452..5dc48adef2 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager;
@@ -40,7 +41,8 @@ public interface RpcEngine {
ProtocolProxy getProxy(Class protocol,
long clientVersion, InetSocketAddress addr,
UserGroupInformation ticket, Configuration conf,
- SocketFactory factory, int rpcTimeout) throws IOException;
+ SocketFactory factory, int rpcTimeout,
+ RetryPolicy connectionRetryPolicy) throws IOException;
/** Expert: Make multiple, parallel calls to a set of servers. */
Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
index 2ebf42a9aa..f61f0f2fd7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
@@ -31,6 +31,7 @@
import org.apache.commons.logging.*;
import org.apache.hadoop.io.*;
+import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.ipc.Client.ConnectionId;
import org.apache.hadoop.ipc.RPC.RpcInvoker;
import org.apache.hadoop.ipc.VersionedProtocol;
@@ -259,9 +260,14 @@ static Client getClient(Configuration conf) {
public ProtocolProxy getProxy(Class protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket,
Configuration conf, SocketFactory factory,
- int rpcTimeout)
+ int rpcTimeout, RetryPolicy connectionRetryPolicy)
throws IOException {
+ if (connectionRetryPolicy != null) {
+ throw new UnsupportedOperationException(
+ "Not supported: connectionRetryPolicy=" + connectionRetryPolicy);
+ }
+
T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
new Class[] { protocol }, new Invoker(protocol, addr, ticket, conf,
factory, rpcTimeout));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index da8fab2956..892ba07359 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -19,6 +19,7 @@
import java.util.ArrayList;
import java.util.Collection;
+import java.util.List;
import java.util.Random;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -55,8 +56,8 @@ public InvalidTopologyException(String msg) {
/** InnerNode represents a switch/router of a data center or rack.
* Different from a leaf node, it has non-null children.
*/
- private class InnerNode extends NodeBase {
- private ArrayList children=new ArrayList();
+ static class InnerNode extends NodeBase {
+ protected List children=new ArrayList();
private int numOfLeaves;
/** Construct an InnerNode from a path-like string */
@@ -76,7 +77,7 @@ private class InnerNode extends NodeBase {
}
/** @return its children */
- Collection getChildren() {return children;}
+ List getChildren() {return children;}
/** @return the number of children this node has */
int getNumOfChildren() {
@@ -182,7 +183,23 @@ boolean add(Node n) {
}
}
}
-
+
+ /**
+ * Creates a parent node to be added to the list of children.
+ * Creates a node using the InnerNode four argument constructor specifying
+ * the name, location, parent, and level of this node.
+ *
+ * To be overridden in subclasses for specific InnerNode implementations,
+ * as alternative to overriding the full {@link #add(Node)} method.
+ *
+ * @param parentName The name of the parent node
+ * @return A new inner node
+ * @see InnerNode#InnerNode(String, String, InnerNode, int)
+ */
+ protected InnerNode createParentNode(String parentName) {
+ return new InnerNode(parentName, getPath(this), this, this.getLevel()+1);
+ }
+
/** Remove node n from the subtree of this node
* @param n node to be deleted
* @return true if the node is deleted; false otherwise
@@ -263,7 +280,7 @@ private Node getLoc(String loc) {
* @param excludedNode an excluded node (can be null)
* @return
*/
- private Node getLeaf(int leafIndex, Node excludedNode) {
+ Node getLeaf(int leafIndex, Node excludedNode) {
int count=0;
// check if the excluded node a leaf
boolean isLeaf =
@@ -308,7 +325,21 @@ private Node getLeaf(int leafIndex, Node excludedNode) {
return null;
}
}
-
+
+ /**
+ * Determine if children a leaves, default implementation calls {@link #isRack()}
+ *
To be overridden in subclasses for specific InnerNode implementations,
+ * as alternative to overriding the full {@link #getLeaf(int, Node)} method.
+ *
+ * @return true if children are leaves, false otherwise
+ */
+ protected boolean areChildrenLeaves() {
+ return isRack();
+ }
+
+ /**
+ * Get number of leaves.
+ */
int getNumOfLeaves() {
return numOfLeaves;
}
@@ -317,18 +348,18 @@ int getNumOfLeaves() {
/**
* the root cluster map
*/
- InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
+ InnerNode clusterMap;
/** Depth of all leaf nodes */
private int depthOfAllLeaves = -1;
/** rack counter */
- private int numOfRacks = 0;
+ protected int numOfRacks = 0;
/** the lock used to manage access */
- private ReadWriteLock netlock;
-
+ protected ReadWriteLock netlock = new ReentrantReadWriteLock();
+
public NetworkTopology() {
- netlock = new ReentrantReadWriteLock();
+ clusterMap = new InnerNode(InnerNode.ROOT);
}
-
+
/** Add a leaf node
* Update node counter & rack counter if necessary
* @param node node to be added; can be null
@@ -344,7 +375,7 @@ public void add(Node node) {
}
netlock.writeLock().lock();
try {
- Node rack = getNode(node.getNetworkLocation());
+ Node rack = getNodeForNetworkLocation(node);
if (rack != null && !(rack instanceof InnerNode)) {
throw new IllegalArgumentException("Unexpected data node "
+ node.toString()
@@ -376,7 +407,26 @@ public void add(Node node) {
netlock.writeLock().unlock();
}
}
-
+
+ /**
+ * Return a reference to the node given its string representation.
+ * Default implementation delegates to {@link #getNode(String)}.
+ *
+ *
To be overridden in subclasses for specific NetworkTopology
+ * implementations, as alternative to overriding the full {@link #add(Node)}
+ * method.
+ *
+ * @param node The string representation of this node's network location is
+ * used to retrieve a Node object.
+ * @return a reference to the node; null if the node is not in the tree
+ *
+ * @see #add(Node)
+ * @see #getNode(String)
+ */
+ protected Node getNodeForNetworkLocation(Node node) {
+ return getNode(node.getNetworkLocation());
+ }
+
/** Remove a node
* Update node counter and rack counter if necessary
* @param node node to be removed; can be null
@@ -403,7 +453,7 @@ public void remove(Node node) {
netlock.writeLock().unlock();
}
}
-
+
/** Check if the tree contains node node
*
* @param node a node
@@ -443,7 +493,21 @@ public Node getNode(String loc) {
netlock.readLock().unlock();
}
}
-
+
+ /** Given a string representation of a rack for a specific network
+ * location
+ *
+ * To be overridden in subclasses for specific NetworkTopology
+ * implementations, as alternative to overriding the full
+ * {@link #getRack(String)} method.
+ * @param loc
+ * a path-like string representation of a network location
+ * @return a rack string
+ */
+ public String getRack(String loc) {
+ return loc;
+ }
+
/** @return the total number of racks */
public int getNumOfRacks() {
netlock.readLock().lock();
@@ -453,7 +517,7 @@ public int getNumOfRacks() {
netlock.readLock().unlock();
}
}
-
+
/** @return the total number of leaf nodes */
public int getNumOfLeaves() {
netlock.readLock().lock();
@@ -463,7 +527,7 @@ public int getNumOfLeaves() {
netlock.readLock().unlock();
}
}
-
+
/** Return the distance between two nodes
* It is assumed that the distance from one node to its parent is 1
* The distance between two nodes is calculated by summing up their distances
@@ -509,8 +573,8 @@ public int getDistance(Node node1, Node node2) {
return Integer.MAX_VALUE;
}
return dis+2;
- }
-
+ }
+
/** Check if two nodes are on the same rack
* @param node1 one node (can be null)
* @param node2 another node (can be null)
@@ -525,13 +589,44 @@ public boolean isOnSameRack( Node node1, Node node2) {
netlock.readLock().lock();
try {
- return node1.getParent()==node2.getParent();
+ return isSameParents(node1, node2);
} finally {
netlock.readLock().unlock();
}
}
-
- final private static Random r = new Random();
+
+ /**
+ * Check if network topology is aware of NodeGroup
+ */
+ public boolean isNodeGroupAware() {
+ return false;
+ }
+
+ /**
+ * Return false directly as not aware of NodeGroup, to be override in sub-class
+ */
+ public boolean isOnSameNodeGroup(Node node1, Node node2) {
+ return false;
+ }
+
+ /**
+ * Compare the parents of each node for equality
+ *
+ *
To be overridden in subclasses for specific NetworkTopology
+ * implementations, as alternative to overriding the full
+ * {@link #isOnSameRack(Node, Node)} method.
+ *
+ * @param node1 the first node to compare
+ * @param node2 the second node to compare
+ * @return true if their parents are equal, false otherwise
+ *
+ * @see #isOnSameRack(Node, Node)
+ */
+ protected boolean isSameParents(Node node1, Node node2) {
+ return node1.getParent()==node2.getParent();
+ }
+
+ final protected static Random r = new Random();
/** randomly choose one node from scope
* if scope starts with ~, choose one from the all nodes except for the
* ones in scope; otherwise, choose one from scope
@@ -550,7 +645,7 @@ public Node chooseRandom(String scope) {
netlock.readLock().unlock();
}
}
-
+
private Node chooseRandom(String scope, String excludedScope){
if (excludedScope != null) {
if (scope.startsWith(excludedScope)) {
@@ -579,7 +674,25 @@ private Node chooseRandom(String scope, String excludedScope){
int leaveIndex = r.nextInt(numOfDatanodes);
return innerNode.getLeaf(leaveIndex, node);
}
-
+
+ /** return leaves in scope
+ * @param scope a path string
+ * @return leaves nodes under specific scope
+ */
+ public List getLeaves(String scope) {
+ Node node = getNode(scope);
+ List leafNodes = new ArrayList();
+ if (!(node instanceof InnerNode)) {
+ leafNodes.add(node);
+ } else {
+ InnerNode innerNode = (InnerNode) node;
+ for (int i=0;iscope but not in excludedNodes
* if scope starts with ~, return the number of nodes that are not
* in scope and excludedNodes;
@@ -619,7 +732,7 @@ public int countNumOfAvailableNodes(String scope,
netlock.readLock().unlock();
}
}
-
+
/** convert a network tree to a string */
public String toString() {
// print the number of racks
@@ -640,13 +753,12 @@ public String toString() {
return tree.toString();
}
- /* swap two array items */
- static private void swap(Node[] nodes, int i, int j) {
+ /** swap two array items */
+ static protected void swap(Node[] nodes, int i, int j) {
Node tempNode;
tempNode = nodes[j];
nodes[j] = nodes[i];
nodes[i] = tempNode;
-
}
/** Sort nodes array by their distances to reader
@@ -697,4 +809,5 @@ public void pseudoSortByDistance( Node reader, Node[] nodes ) {
swap(nodes, 0, r.nextInt(nodes.length));
}
}
+
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
new file mode 100644
index 0000000000..6066cd2a61
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopologyWithNodeGroup.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * The class extends NetworkTopology to represents a cluster of computer with
+ * a 4-layers hierarchical network topology.
+ * In this network topology, leaves represent data nodes (computers) and inner
+ * nodes represent switches/routers that manage traffic in/out of data centers,
+ * racks or physical host (with virtual switch).
+ *
+ * @see NetworkTopology
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+public class NetworkTopologyWithNodeGroup extends NetworkTopology {
+
+ public final static String DEFAULT_NODEGROUP = "/default-nodegroup";
+
+ public NetworkTopologyWithNodeGroup() {
+ clusterMap = new InnerNodeWithNodeGroup(InnerNode.ROOT);
+ }
+
+ @Override
+ protected Node getNodeForNetworkLocation(Node node) {
+ // if node only with default rack info, here we need to add default
+ // nodegroup info
+ if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
+ node.setNetworkLocation(node.getNetworkLocation()
+ + DEFAULT_NODEGROUP);
+ }
+ Node nodeGroup = getNode(node.getNetworkLocation());
+ if (nodeGroup == null) {
+ nodeGroup = new InnerNode(node.getNetworkLocation());
+ }
+ return getNode(nodeGroup.getNetworkLocation());
+ }
+
+ @Override
+ public String getRack(String loc) {
+ netlock.readLock().lock();
+ try {
+ loc = InnerNode.normalize(loc);
+ Node locNode = getNode(loc);
+ if (locNode instanceof InnerNodeWithNodeGroup) {
+ InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
+ if (node.isRack()) {
+ return loc;
+ } else if (node.isNodeGroup()) {
+ return node.getNetworkLocation();
+ } else {
+ // may be a data center
+ return null;
+ }
+ } else {
+ // not in cluster map, don't handle it
+ return loc;
+ }
+ } finally {
+ netlock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Given a string representation of a node group for a specific network
+ * location
+ *
+ * @param loc
+ * a path-like string representation of a network location
+ * @return a node group string
+ */
+ public String getNodeGroup(String loc) {
+ netlock.readLock().lock();
+ try {
+ loc = InnerNode.normalize(loc);
+ Node locNode = getNode(loc);
+ if (locNode instanceof InnerNodeWithNodeGroup) {
+ InnerNodeWithNodeGroup node = (InnerNodeWithNodeGroup) locNode;
+ if (node.isNodeGroup()) {
+ return loc;
+ } else if (node.isRack()) {
+ // not sure the node group for a rack
+ return null;
+ } else {
+ // may be a leaf node
+ return getNodeGroup(node.getNetworkLocation());
+ }
+ } else {
+ // not in cluster map, don't handle it
+ return loc;
+ }
+ } finally {
+ netlock.readLock().unlock();
+ }
+ }
+
+ @Override
+ public boolean isOnSameRack( Node node1, Node node2) {
+ if (node1 == null || node2 == null ||
+ node1.getParent() == null || node2.getParent() == null) {
+ return false;
+ }
+
+ netlock.readLock().lock();
+ try {
+ return isSameParents(node1.getParent(), node2.getParent());
+ } finally {
+ netlock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Check if two nodes are on the same node group (hypervisor) The
+ * assumption here is: each nodes are leaf nodes.
+ *
+ * @param node1
+ * one node (can be null)
+ * @param node2
+ * another node (can be null)
+ * @return true if node1 and node2 are on the same node group; false
+ * otherwise
+ * @exception IllegalArgumentException
+ * when either node1 or node2 is null, or node1 or node2 do
+ * not belong to the cluster
+ */
+ @Override
+ public boolean isOnSameNodeGroup(Node node1, Node node2) {
+ if (node1 == null || node2 == null) {
+ return false;
+ }
+ netlock.readLock().lock();
+ try {
+ return isSameParents(node1, node2);
+ } finally {
+ netlock.readLock().unlock();
+ }
+ }
+
+ /**
+ * Check if network topology is aware of NodeGroup
+ */
+ @Override
+ public boolean isNodeGroupAware() {
+ return true;
+ }
+
+ /** Add a leaf node
+ * Update node counter & rack counter if necessary
+ * @param node node to be added; can be null
+ * @exception IllegalArgumentException if add a node to a leave
+ * or node to be added is not a leaf
+ */
+ @Override
+ public void add(Node node) {
+ if (node==null) return;
+ if( node instanceof InnerNode ) {
+ throw new IllegalArgumentException(
+ "Not allow to add an inner node: "+NodeBase.getPath(node));
+ }
+ netlock.writeLock().lock();
+ try {
+ Node rack = null;
+
+ // if node only with default rack info, here we need to add default
+ // nodegroup info
+ if (NetworkTopology.DEFAULT_RACK.equals(node.getNetworkLocation())) {
+ node.setNetworkLocation(node.getNetworkLocation() +
+ NetworkTopologyWithNodeGroup.DEFAULT_NODEGROUP);
+ }
+ Node nodeGroup = getNode(node.getNetworkLocation());
+ if (nodeGroup == null) {
+ nodeGroup = new InnerNodeWithNodeGroup(node.getNetworkLocation());
+ }
+ rack = getNode(nodeGroup.getNetworkLocation());
+
+ if (rack != null && !(rack instanceof InnerNode)) {
+ throw new IllegalArgumentException("Unexpected data node "
+ + node.toString()
+ + " at an illegal network location");
+ }
+ if (clusterMap.add(node)) {
+ LOG.info("Adding a new node: " + NodeBase.getPath(node));
+ if (rack == null) {
+ // We only track rack number here
+ numOfRacks++;
+ }
+ }
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("NetworkTopology became:\n" + this.toString());
+ }
+ } finally {
+ netlock.writeLock().unlock();
+ }
+ }
+
+ /** Remove a node
+ * Update node counter and rack counter if necessary
+ * @param node node to be removed; can be null
+ */
+ @Override
+ public void remove(Node node) {
+ if (node==null) return;
+ if( node instanceof InnerNode ) {
+ throw new IllegalArgumentException(
+ "Not allow to remove an inner node: "+NodeBase.getPath(node));
+ }
+ LOG.info("Removing a node: "+NodeBase.getPath(node));
+ netlock.writeLock().lock();
+ try {
+ if (clusterMap.remove(node)) {
+ Node nodeGroup = getNode(node.getNetworkLocation());
+ if (nodeGroup == null) {
+ nodeGroup = new InnerNode(node.getNetworkLocation());
+ }
+ InnerNode rack = (InnerNode)getNode(nodeGroup.getNetworkLocation());
+ if (rack == null) {
+ numOfRacks--;
+ }
+ }
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("NetworkTopology became:\n" + this.toString());
+ }
+ } finally {
+ netlock.writeLock().unlock();
+ }
+ }
+
+ /** Sort nodes array by their distances to reader
+ * It linearly scans the array, if a local node is found, swap it with
+ * the first element of the array.
+ * If a local node group node is found, swap it with the first element
+ * following the local node.
+ * If a local rack node is found, swap it with the first element following
+ * the local node group node.
+ * If neither local node, node group node or local rack node is found, put a
+ * random replica location at position 0.
+ * It leaves the rest nodes untouched.
+ * @param reader the node that wishes to read a block from one of the nodes
+ * @param nodes the list of nodes containing data for the reader
+ */
+ @Override
+ public void pseudoSortByDistance( Node reader, Node[] nodes ) {
+
+ if (reader != null && !this.contains(reader)) {
+ // if reader is not a datanode (not in NetworkTopology tree), we will
+ // replace this reader with a sibling leaf node in tree.
+ Node nodeGroup = getNode(reader.getNetworkLocation());
+ if (nodeGroup != null && nodeGroup instanceof InnerNode) {
+ InnerNode parentNode = (InnerNode) nodeGroup;
+ // replace reader with the first children of its parent in tree
+ reader = parentNode.getLeaf(0, null);
+ } else {
+ return;
+ }
+ }
+ int tempIndex = 0;
+ int localRackNode = -1;
+ int localNodeGroupNode = -1;
+ if (reader != null) {
+ //scan the array to find the local node & local rack node
+ for (int i = 0; i < nodes.length; i++) {
+ if (tempIndex == 0 && reader == nodes[i]) { //local node
+ //swap the local node and the node at position 0
+ if (i != 0) {
+ swap(nodes, tempIndex, i);
+ }
+ tempIndex=1;
+
+ if (localRackNode != -1 && (localNodeGroupNode !=-1)) {
+ if (localRackNode == 0) {
+ localRackNode = i;
+ }
+ if (localNodeGroupNode == 0) {
+ localNodeGroupNode = i;
+ }
+ break;
+ }
+ } else if (localNodeGroupNode == -1 && isOnSameNodeGroup(reader,
+ nodes[i])) {
+ //local node group
+ localNodeGroupNode = i;
+ // node local and rack local are already found
+ if(tempIndex != 0 && localRackNode != -1) break;
+ } else if (localRackNode == -1 && isOnSameRack(reader, nodes[i])) {
+ localRackNode = i;
+ if (tempIndex != 0 && localNodeGroupNode != -1) break;
+ }
+ }
+
+ // swap the local nodegroup node and the node at position tempIndex
+ if(localNodeGroupNode != -1 && localNodeGroupNode != tempIndex) {
+ swap(nodes, tempIndex, localNodeGroupNode);
+ if (localRackNode == tempIndex) {
+ localRackNode = localNodeGroupNode;
+ }
+ tempIndex++;
+ }
+
+ // swap the local rack node and the node at position tempIndex
+ if(localRackNode != -1 && localRackNode != tempIndex) {
+ swap(nodes, tempIndex, localRackNode);
+ tempIndex++;
+ }
+ }
+
+ // put a random node at position 0 if there is not a local/local-nodegroup/
+ // local-rack node
+ if (tempIndex == 0 && localNodeGroupNode == -1 && localRackNode == -1
+ && nodes.length != 0) {
+ swap(nodes, 0, r.nextInt(nodes.length));
+ }
+ }
+
+ /** InnerNodeWithNodeGroup represents a switch/router of a data center, rack
+ * or physical host. Different from a leaf node, it has non-null children.
+ */
+ static class InnerNodeWithNodeGroup extends InnerNode {
+ public InnerNodeWithNodeGroup(String name, String location,
+ InnerNode parent, int level) {
+ super(name, location, parent, level);
+ }
+
+ public InnerNodeWithNodeGroup(String name, String location) {
+ super(name, location);
+ }
+
+ public InnerNodeWithNodeGroup(String path) {
+ super(path);
+ }
+
+ @Override
+ boolean isRack() {
+ // it is node group
+ if (getChildren().isEmpty()) {
+ return false;
+ }
+
+ Node firstChild = children.get(0);
+
+ if (firstChild instanceof InnerNode) {
+ Node firstGrandChild = (((InnerNode) firstChild).children).get(0);
+ if (firstGrandChild instanceof InnerNode) {
+ // it is datacenter
+ return false;
+ } else {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ /**
+ * Judge if this node represents a node group
+ *
+ * @return true if it has no child or its children are not InnerNodes
+ */
+ boolean isNodeGroup() {
+ if (children.isEmpty()) {
+ return true;
+ }
+ Node firstChild = children.get(0);
+ if (firstChild instanceof InnerNode) {
+ // it is rack or datacenter
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ protected InnerNode createParentNode(String parentName) {
+ return new InnerNodeWithNodeGroup(parentName, getPath(this), this,
+ this.getLevel() + 1);
+ }
+
+ @Override
+ protected boolean areChildrenLeaves() {
+ return isNodeGroup();
+ }
+ }
+}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
index c7738086fb..8c3c1b2d35 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenIdentifier.java
@@ -31,6 +31,8 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.TokenIdentifier;
+import com.google.common.annotations.VisibleForTesting;
+
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
@InterfaceStability.Evolving
public abstract class AbstractDelegationTokenIdentifier
@@ -173,16 +175,17 @@ public void readFields(DataInput in) throws IOException {
throw new IOException("Unknown version of delegation token " +
version);
}
- owner.readFields(in);
- renewer.readFields(in);
- realUser.readFields(in);
+ owner.readFields(in, Text.DEFAULT_MAX_LEN);
+ renewer.readFields(in, Text.DEFAULT_MAX_LEN);
+ realUser.readFields(in, Text.DEFAULT_MAX_LEN);
issueDate = WritableUtils.readVLong(in);
maxDate = WritableUtils.readVLong(in);
sequenceNumber = WritableUtils.readVInt(in);
masterKeyId = WritableUtils.readVInt(in);
}
- public void write(DataOutput out) throws IOException {
+ @VisibleForTesting
+ void writeImpl(DataOutput out) throws IOException {
out.writeByte(VERSION);
owner.write(out);
renewer.write(out);
@@ -193,6 +196,19 @@ public void write(DataOutput out) throws IOException {
WritableUtils.writeVInt(out, masterKeyId);
}
+ public void write(DataOutput out) throws IOException {
+ if (owner.getLength() > Text.DEFAULT_MAX_LEN) {
+ throw new IOException("owner is too long to be serialized!");
+ }
+ if (renewer.getLength() > Text.DEFAULT_MAX_LEN) {
+ throw new IOException("renewer is too long to be serialized!");
+ }
+ if (realUser.getLength() > Text.DEFAULT_MAX_LEN) {
+ throw new IOException("realuser is too long to be serialized!");
+ }
+ writeImpl(out);
+ }
+
public String toString() {
StringBuilder buffer = new StringBuilder();
buffer
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
index f907e3efeb..989c96a8e3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ShutdownHookManager.java
@@ -30,7 +30,7 @@
/**
* The ShutdownHookManager
enables running shutdownHook
- * in a determistic order, higher priority first.
+ * in a deterministic order, higher priority first.
*
* The JVM runs ShutdownHooks in a non-deterministic order or in parallel.
* This class registers a single JVM shutdownHook and run all the
@@ -169,7 +169,7 @@ public boolean removeShutdownHook(Runnable shutdownHook) {
}
/**
- * Indicates if a shutdownHook is registered or nt.
+ * Indicates if a shutdownHook is registered or not.
*
* @param shutdownHook shutdownHook to check if registered.
* @return TRUE/FALSE depending if the shutdownHook is is registered.
@@ -177,5 +177,14 @@ public boolean removeShutdownHook(Runnable shutdownHook) {
public boolean hasShutdownHook(Runnable shutdownHook) {
return hooks.contains(new HookEntry(shutdownHook, 0));
}
+
+ /**
+ * Indicates if shutdown is in progress or not.
+ *
+ * @return TRUE if the shutdown is in progress, otherwise FALSE.
+ */
+ public boolean isShutdownInProgress() {
+ return shutdownInProgress.get();
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg b/hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg
deleted file mode 100644
index a69c197883..0000000000
--- a/hadoop-common-project/hadoop-common/src/main/native/.autom4te.cfg
+++ /dev/null
@@ -1,42 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# autom4te configuration for hadoop-native library
-#
-
-begin-language: "Autoheader-preselections"
-args: --no-cache
-end-language: "Autoheader-preselections"
-
-begin-language: "Automake-preselections"
-args: --no-cache
-end-language: "Automake-preselections"
-
-begin-language: "Autoreconf-preselections"
-args: --no-cache
-end-language: "Autoreconf-preselections"
-
-begin-language: "Autoconf-without-aclocal-m4"
-args: --no-cache
-end-language: "Autoconf-without-aclocal-m4"
-
-begin-language: "Autoconf"
-args: --no-cache
-end-language: "Autoconf"
-
diff --git a/hadoop-common-project/hadoop-common/src/main/native/Makefile.am b/hadoop-common-project/hadoop-common/src/main/native/Makefile.am
deleted file mode 100644
index c4ca564c2b..0000000000
--- a/hadoop-common-project/hadoop-common/src/main/native/Makefile.am
+++ /dev/null
@@ -1,66 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Notes:
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os-arch}.
-# 2. This makefile depends on the following environment variables to function correctly:
-# * HADOOP_NATIVE_SRCDIR
-# * JAVA_HOME
-# * JVM_DATA_MODEL
-# * OS_NAME
-# * OS_ARCH
-# All these are setup by build.xml.
-#
-
-# Export $(PLATFORM) to prevent proliferation of sub-shells
-export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z])
-
-ACLOCAL_AMFLAGS = -I m4
-AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src \
- -I$(HADOOP_NATIVE_SRCDIR)/javah
-AM_LDFLAGS = @JNI_LDFLAGS@
-AM_CFLAGS = -g -Wall -fPIC -O2
-if SPECIFY_DATA_MODEL
-AM_LDFLAGS += -m$(JVM_DATA_MODEL)
-AM_CFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-lib_LTLIBRARIES = libhadoop.la
-libhadoop_la_SOURCES = src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c \
- src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c \
- src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c \
- src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c \
- src/org/apache/hadoop/io/compress/lz4/lz4.c \
- src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c \
- src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c \
- src/org/apache/hadoop/security/getGroup.c \
- src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c \
- src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c \
- src/org/apache/hadoop/io/nativeio/file_descriptor.c \
- src/org/apache/hadoop/io/nativeio/errno_enum.c \
- src/org/apache/hadoop/io/nativeio/NativeIO.c \
- src/org/apache/hadoop/util/NativeCrc32.c \
- src/org/apache/hadoop/util/bulk_crc32.c
-
-libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
-libhadoop_la_LIBADD = -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 b/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
deleted file mode 100644
index 93e05b8148..0000000000
--- a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
+++ /dev/null
@@ -1,28 +0,0 @@
-# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
-# --------------------------------------------------
-# Compute the 'actual' dynamic-library used
-# for LIBRARY and set it to PREPROC_SYMBOL
-AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
-[
-AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
- [
- echo '$2' > conftest.c
- if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
- dnl Try objdump and ldd in that order to get the dynamic library
- if test ! -z "`which objdump | grep -v 'no objdump'`"; then
- ac_cv_libname_$1="`objdump -p conftest | grep NEEDED | grep $1 | sed 's/\W*NEEDED\W*\(.*\)\W*$/\"\1\"/'`"
- elif test ! -z "`which ldd | grep -v 'no ldd'`"; then
- ac_cv_libname_$1="`ldd conftest | grep $1 | sed 's/^[[[^A-Za-z0-9]]]*\([[[A-Za-z0-9\.]]]*\)[[[^A-Za-z0-9]]]*=>.*$/\"\1\"/'`"
- elif test ! -z "`which otool | grep -v 'no otool'`"; then
- ac_cv_libname_$1=\"`otool -L conftest | grep $1 | sed -e 's/^[ ]*//' -e 's/ .*//' -e 's/.*\/\(.*\)$/\1/'`\";
- else
- AC_MSG_ERROR(Can't find either 'objdump' or 'ldd' or 'otool' to compute the dynamic library for '-l$1')
- fi
- else
- ac_cv_libname_$1=libnotfound.so
- fi
- rm -f conftest*
- ]
-)
-AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
-])# AC_COMPUTE_NEEDED_DSO
diff --git a/hadoop-common-project/hadoop-common/src/main/native/configure.ac b/hadoop-common-project/hadoop-common/src/main/native/configure.ac
deleted file mode 100644
index 34408d6418..0000000000
--- a/hadoop-common-project/hadoop-common/src/main/native/configure.ac
+++ /dev/null
@@ -1,130 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# configure.ac for hadoop native code.
-#
-
-# Notes:
-# 1. This configure.ac depends on the following environment variables to function correctly:
-# * HADOOP_NATIVE_SRCDIR
-# * JAVA_HOME
-# * JVM_DATA_MODEL
-# * OS_NAME
-# * OS_ARCH
-# All these are setup by build.xml.
-
-# -*- Autoconf -*-
-# Process this file with autoconf to produce a configure script.
-#
-
-AC_PREREQ(2.59)
-AC_INIT(src/org_apache_hadoop.h)
-AC_CONFIG_SRCDIR([src/org_apache_hadoop.h])
-AC_CONFIG_AUX_DIR([config])
-AC_CONFIG_MACRO_DIR([m4])
-AC_CONFIG_HEADER([config.h])
-AC_SYS_LARGEFILE
-AC_GNU_SOURCE
-
-AM_INIT_AUTOMAKE(hadoop,1.0.0)
-
-# Checks for programs.
-AC_PROG_CC
-AC_PROG_LIBTOOL
-
-# Checks for libraries.
-dnl Check for '-ldl'
-AC_CHECK_LIB([dl], [dlopen])
-
-dnl Check for '-ljvm'
-JNI_LDFLAGS=""
-if test $JAVA_HOME != ""
-then
- JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
- JVMSOPATH=`find $JAVA_HOME/jre/ -name libjvm.so | head -n 1`
- JNI_LDFLAGS="$JNI_LDFLAGS -L`dirname $JVMSOPATH`"
-fi
-LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
-AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
-AC_SUBST([JNI_LDFLAGS])
-
-# Checks for header files.
-dnl Check for Ansi C headers
-AC_HEADER_STDC
-
-dnl Check for other standard C headers
-AC_CHECK_HEADERS([stdio.h stddef.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
-
-dnl Check for JNI headers
-JNI_CPPFLAGS=""
-if test $JAVA_HOME != ""
-then
- for dir in `find $JAVA_HOME/include -follow -type d`
- do
- JNI_CPPFLAGS="$JNI_CPPFLAGS -I$dir"
- done
-fi
-cppflags_bak=$CPPFLAGS
-CPPFLAGS="$CPPFLAGS $JNI_CPPFLAGS"
-AC_CHECK_HEADERS([jni.h], [], AC_MSG_ERROR([Native java headers not found. Is \$JAVA_HOME set correctly?]))
-CPPFLAGS=$cppflags_bak
-AC_SUBST([JNI_CPPFLAGS])
-
-dnl Check for zlib headers
-AC_CHECK_HEADERS([zlib.h zconf.h],
- AC_COMPUTE_NEEDED_DSO(z,
- [#include "zlib.h"
- int main(int argc, char **argv){zlibVersion();return 0;}],
- HADOOP_ZLIB_LIBRARY),
- AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
-
-dnl Check for snappy headers
-AC_CHECK_HEADERS([snappy-c.h],
- AC_COMPUTE_NEEDED_DSO(snappy,
- [#include "snappy-c.h"
- int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
- HADOOP_SNAPPY_LIBRARY),
- AC_MSG_WARN(Snappy headers were not found... building without snappy.))
-
-dnl Check for headers needed by the native Group resolution implementation
-AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
-
-dnl check for posix_fadvise
-AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(posix_fadvise)])
-
-dnl check for sync_file_range
-AC_CHECK_HEADERS(fcntl.h, [AC_CHECK_FUNCS(sync_file_range)])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-
-# Checks for library functions.
-AC_CHECK_FUNCS([memset])
-
-# Check for nonstandard STRERROR_R
-AC_FUNC_STRERROR_R
-
-AM_CONDITIONAL([SPECIFY_DATA_MODEL], [case $host_cpu in arm*) false;; *) true;; esac])
-
-AC_CONFIG_FILES([Makefile])
-AC_OUTPUT
-
-#
-#vim: sw=2: ts=2: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am b/hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am
deleted file mode 100644
index 9b536ff440..0000000000
--- a/hadoop-common-project/hadoop-common/src/main/native/lib/Makefile.am
+++ /dev/null
@@ -1,47 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Makefile template for building libhadoop.so
-#
-
-#
-# Notes:
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os.arch}/lib
-# 2. This makefile depends on the following environment variables to function correctly:
-# * HADOOP_NATIVE_SRCDIR
-# * JAVA_HOME
-# * OS_ARCH
-# All these are setup by build.xml and/or the top-level makefile.
-#
-
-# Add .lo files in $(SUBDIRS) to construct libhadoop.so
-HADOOP_OBJS = $(foreach path,$(addprefix ../,$(SUBDIRS)),$(wildcard $(path)/*.lo))
-AM_LDFLAGS = @JNI_LDFLAGS@
-if SPECIFY_DATA_MODEL
-AM_LDFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-lib_LTLIBRARIES = libhadoop.la
-libhadoop_la_SOURCES =
-libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
-libhadoop_la_LIBADD = $(HADOOP_OBJS) -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
index d52a4f6b2a..641ecd73b7 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Compressor.c
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
+#include "config.h"
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_io_compress_lz4_Lz4Compressor.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
index 547b027cc1..3eebc1859d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/lz4/Lz4Decompressor.c
@@ -16,10 +16,7 @@
* limitations under the License.
*/
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
+#include "config.h"
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_io_compress_lz4_Lz4Decompressor.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
index 13991c23f4..96a2402ae7 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
@@ -16,36 +16,12 @@
* limitations under the License.
*/
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
- #include
-#else
- #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
- #include
-#else
- #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
- #include
-#else
- #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
- #include
-#else
- #error 'dlfcn.h not found'
-#endif
+#include
+#include
+#include
+#include
+#include "config.h"
#include "org_apache_hadoop_io_compress_snappy.h"
#include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
@@ -123,5 +99,3 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
return (jint)compressed_direct_buf_len;
}
-
-#endif //define HADOOP_SNAPPY_LIBRARY
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
index 767c5f4b31..a5f07ca556 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
@@ -16,36 +16,12 @@
* limitations under the License.
*/
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
-#if defined HAVE_STDIO_H
- #include
-#else
- #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
- #include
-#else
- #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
- #include
-#else
- #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
- #include
-#else
- #error 'dlfcn.h not found'
-#endif
+#include
+#include
+#include
+#include
+#include "config.h"
#include "org_apache_hadoop_io_compress_snappy.h"
#include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
@@ -127,5 +103,3 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
return (jint)uncompressed_direct_buf_len;
}
-
-#endif //define HADOOP_SNAPPY_LIBRARY
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
index 815e030673..3e99d5d20d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
@@ -17,42 +17,13 @@
*/
-#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
+#ifndef ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
#define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
-
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HADOOP_SNAPPY_LIBRARY
-
- #if defined HAVE_STDDEF_H
- #include
- #else
- #error 'stddef.h not found'
- #endif
-
- #if defined HAVE_SNAPPY_C_H
- #include
- #else
- #error 'Please install snappy-development packages for your platform.'
- #endif
-
- #if defined HAVE_DLFCN_H
- #include
- #else
- #error "dlfcn.h not found"
- #endif
-
- #if defined HAVE_JNI_H
- #include
- #else
- #error 'jni.h not found'
- #endif
-
- #include "org_apache_hadoop.h"
-
-#endif //define HADOOP_SNAPPY_LIBRARY
+#include "org_apache_hadoop.h"
+#include
+#include
+#include
+#include
#endif //ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am
deleted file mode 100644
index 821f33f052..0000000000
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/Makefile.am
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# Makefile template for building native 'zlib' for hadoop.
-#
-
-#
-# Notes:
-# 1. This makefile is designed to do the actual builds in $(HADOOP_PREFIX)/build/native/${os.name}-${os.arch}/$(subdir) .
-# 2. This makefile depends on the following environment variables to function correctly:
-# * HADOOP_NATIVE_SRCDIR
-# * JAVA_HOME
-# * JVM_DATA_MODEL
-# * OS_ARCH
-# * PLATFORM
-# All these are setup by build.xml and/or the top-level makefile.
-# 3. The creation of requisite jni headers/stubs are also done by build.xml and they are
-# assumed to be in $(HADOOP_PREFIX)/build/native/src/org/apache/hadoop/io/compress/zlib.
-#
-
-# The 'vpath directive' to locate the actual source files
-vpath %.c $(HADOOP_NATIVE_SRCDIR)/$(subdir)
-
-AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src
-AM_LDFLAGS = @JNI_LDFLAGS@
-AM_CFLAGS = -g -Wall -fPIC -O2
-if SPECIFY_DATA_MODEL
-AM_CFLAGS += -m$(JVM_DATA_MODEL)
-endif
-
-noinst_LTLIBRARIES = libnativezlib.la
-libnativezlib_la_SOURCES = ZlibCompressor.c ZlibDecompressor.c
-libnativezlib_la_LIBADD = -ldl -ljvm
-
-#
-#vim: sw=4: ts=4: noet
-#
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
index 9ada3f03b0..689c783ef7 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c
@@ -16,34 +16,12 @@
* limitations under the License.
*/
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HAVE_STDIO_H
- #include
-#else
- #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
- #include
-#else
- #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
- #include
-#else
- #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
- #include
-#else
- #error 'dlfcn.h not found'
-#endif
+#include
+#include
+#include
+#include
+#include "config.h"
#include "org_apache_hadoop_io_compress_zlib.h"
#include "org_apache_hadoop_io_compress_zlib_ZlibCompressor.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
index 3047dba267..6abe36381f 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c
@@ -16,34 +16,12 @@
* limitations under the License.
*/
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HAVE_STDIO_H
- #include
-#else
- #error 'stdio.h not found'
-#endif
-
-#if defined HAVE_STDLIB_H
- #include
-#else
- #error 'stdlib.h not found'
-#endif
-
-#if defined HAVE_STRING_H
- #include
-#else
- #error 'string.h not found'
-#endif
-
-#if defined HAVE_DLFCN_H
- #include
-#else
- #error 'dlfcn.h not found'
-#endif
+#include
+#include
+#include
+#include
+#include "config.h"
#include "org_apache_hadoop_io_compress_zlib.h"
#include "org_apache_hadoop_io_compress_zlib_ZlibDecompressor.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
index 16b607b4a9..c53aa531c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zlib/org_apache_hadoop_io_compress_zlib.h
@@ -19,40 +19,13 @@
#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
#define ORG_APACHE_HADOOP_IO_COMPRESS_ZLIB_ZLIB_H
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HAVE_STDDEF_H
- #include
-#else
- #error 'stddef.h not found'
-#endif
-
-#if defined HAVE_ZLIB_H
- #include
-#else
- #error 'Please install zlib-development packages for your platform.'
-#endif
-
-#if defined HAVE_ZCONF_H
- #include
-#else
- #error 'Please install zlib-development packages for your platform.'
-#endif
-
-#if defined HAVE_DLFCN_H
- #include
-#else
- #error "dlfcn.h not found"
-#endif
-
-#if defined HAVE_JNI_H
- #include
-#else
- #error 'jni.h not found'
-#endif
+#include
+#include
+#include
+#include
+#include
+#include "config.h"
#include "org_apache_hadoop.h"
/* A helper macro to convert the java 'stream-handle' to a z_stream pointer. */
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
index fbcf9563ee..c08ea037d9 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/nativeio/NativeIO.c
@@ -16,9 +16,6 @@
* limitations under the License.
*/
-// get the autoconf settings
-#include "config.h"
-
#include
#include
#include
@@ -32,6 +29,7 @@
#include
#include
+#include "config.h"
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_io_nativeio_NativeIO.h"
#include "file_descriptor.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
index 869c2ba2e8..dd51c0a257 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCrc32.c
@@ -16,9 +16,6 @@
* limitations under the License.
*/
-// get the autoconf settings
-#include "config.h"
-
#include
#include
#include
@@ -26,6 +23,7 @@
#include
#include
+#include "config.h"
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_util_NativeCrc32.h"
#include "gcc_optimizations.h"
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h b/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
index 7a777c2f4f..a50c41dbbb 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org_apache_hadoop.h
@@ -24,21 +24,10 @@
#if !defined ORG_APACHE_HADOOP_H
#define ORG_APACHE_HADOOP_H
-#if defined HAVE_CONFIG_H
- #include
-#endif
-
-#if defined HAVE_DLFCN_H
- #include
-#else
- #error "dlfcn.h not found"
-#endif
+#include
+#include
-#if defined HAVE_JNI_H
- #include
-#else
- #error 'jni.h not found'
-#endif
+#include "config.h"
/* A helper macro to 'throw' a java exception. */
#define THROW(env, exception_name, message) \
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
index d8c731ec5f..77693fb1ec 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
@@ -65,7 +65,7 @@ export HADOOP_CLIENT_OPTS="-Xmx128m $HADOOP_CLIENT_OPTS"
export HADOOP_SECURE_DN_USER=${HADOOP_SECURE_DN_USER}
# Where log files are stored. $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
+#export HADOOP_LOG_DIR=${HADOOP_LOG_DIR}/$USER
# Where log files are stored in the secure data environment.
export HADOOP_SECURE_DN_LOG_DIR=${HADOOP_LOG_DIR}/${HADOOP_HDFS_USER}
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 1e72e362e7..c968ff2be6 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -599,10 +599,9 @@
-
-
+
- net.topology.node.switch.mapping.impl
+ net.topology.node.switch.mapping.impl
org.apache.hadoop.net.ScriptBasedMapping
The default implementation of the DNSToSwitchMapping. It
invokes a script specified in net.topology.script.file.name to resolve
@@ -611,6 +610,13 @@
+
+ net.topology.impl
+ org.apache.hadoop.net.NetworkTopology
+ The default implementation of NetworkTopology which is classic three layer one.
+
+
+
net.topology.script.file.name
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
index 34a1780142..4878031262 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java
@@ -663,6 +663,26 @@ public void testPattern() throws IOException {
conf.getPattern("test.pattern3", defaultPattern).pattern());
}
+ public void testPropertySource() throws IOException {
+ out = new BufferedWriter(new FileWriter(CONFIG));
+ startConfig();
+ appendProperty("test.foo", "bar");
+ endConfig();
+ Path fileResource = new Path(CONFIG);
+ conf.addResource(fileResource);
+ conf.set("fs.defaultFS", "value");
+ assertEquals(
+ "Resource string returned for a file-loaded property" +
+ " must be a proper absolute path",
+ fileResource,
+ new Path(conf.getPropertySource("test.foo")));
+ assertEquals("Resource string returned for a set() property must be null",
+ null,
+ conf.getPropertySource("fs.defaultFS"));
+ assertEquals("Resource string returned for an unset property must be null",
+ null, conf.getPropertySource("fs.defaultFoo"));
+ }
+
public void testSocketAddress() throws IOException {
Configuration conf = new Configuration();
final String defaultAddr = "host:1";
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
index 11f4d7af71..525f28bea7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemTestSetup.java
@@ -51,7 +51,19 @@ static public FileSystem setupForViewFileSystem(Configuration conf, FileSystem f
/**
* create the test root on local_fs - the mount table will point here
*/
- fsTarget.mkdirs(FileSystemTestHelper.getTestRootPath(fsTarget));
+ Path targetOfTests = FileSystemTestHelper.getTestRootPath(fsTarget);
+ // In case previous test was killed before cleanup
+ fsTarget.delete(targetOfTests, true);
+ fsTarget.mkdirs(targetOfTests);
+
+ // Setup a link from viewfs to targetfs for the first component of
+ // path of testdir.
+ String testDir = FileSystemTestHelper.getTestRootPath(fsTarget).toUri()
+ .getPath();
+ int indexOf2ndSlash = testDir.indexOf('/', 1);
+ String testDirFirstComponent = testDir.substring(0, indexOf2ndSlash);
+ ConfigUtil.addLink(conf, testDirFirstComponent, fsTarget.makeQualified(
+ new Path(testDirFirstComponent)).toUri());
// viewFs://home => fsTarget://home
String homeDirRoot = fsTarget.getHomeDirectory()
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index d4f5057f7c..60c0703abc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -21,9 +21,13 @@
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
+import java.io.File;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
import org.junit.Test;
import org.mockito.Mockito;
@@ -32,7 +36,8 @@
* Test cases for IOUtils.java
*/
public class TestIOUtils {
-
+ private static final String TEST_FILE_NAME = "test_file";
+
@Test
public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class);
@@ -110,4 +115,41 @@ public void testCopyBytesWithCountShouldThrowOutTheStreamClosureExceptions()
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
}
+ @Test
+ public void testWriteFully() throws IOException {
+ final int INPUT_BUFFER_LEN = 10000;
+ final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
+ byte[] input = new byte[INPUT_BUFFER_LEN];
+ for (int i = 0; i < input.length; i++) {
+ input[i] = (byte)(i & 0xff);
+ }
+ byte[] output = new byte[input.length];
+
+ try {
+ RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
+ FileChannel fc = raf.getChannel();
+ ByteBuffer buf = ByteBuffer.wrap(input);
+ IOUtils.writeFully(fc, buf);
+ raf.seek(0);
+ raf.read(output);
+ for (int i = 0; i < input.length; i++) {
+ assertEquals(input[i], output[i]);
+ }
+ buf.rewind();
+ IOUtils.writeFully(fc, buf, HALFWAY);
+ for (int i = 0; i < HALFWAY; i++) {
+ assertEquals(input[i], output[i]);
+ }
+ raf.seek(0);
+ raf.read(output);
+ for (int i = HALFWAY; i < input.length; i++) {
+ assertEquals(input[i - HALFWAY], output[i]);
+ }
+ } finally {
+ File f = new File(TEST_FILE_NAME);
+ if (f.exists()) {
+ f.delete();
+ }
+ }
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
index 9bf83b906f..21da8c0dce 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
@@ -137,38 +137,38 @@ public void testIO() throws Exception {
}
}
- public void doTestLimitedIO(String str, int strLen) throws IOException {
+ public void doTestLimitedIO(String str, int len) throws IOException {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
out.reset();
try {
- Text.writeString(out, str, strLen);
+ Text.writeString(out, str, len);
fail("expected writeString to fail when told to write a string " +
"that was too long! The string was '" + str + "'");
} catch (IOException e) {
}
- Text.writeString(out, str, strLen + 1);
+ Text.writeString(out, str, len + 1);
// test that it reads correctly
in.reset(out.getData(), out.getLength());
- in.mark(strLen);
+ in.mark(len);
String after;
try {
- after = Text.readString(in, strLen);
+ after = Text.readString(in, len);
fail("expected readString to fail when told to read a string " +
"that was too long! The string was '" + str + "'");
} catch (IOException e) {
}
in.reset();
- after = Text.readString(in, strLen + 1);
+ after = Text.readString(in, len + 1);
assertTrue(str.equals(after));
}
public void testLimitedIO() throws Exception {
- doTestLimitedIO("abcd", 4);
- doTestLimitedIO("", 0);
- doTestLimitedIO("1", 1);
+ doTestLimitedIO("abcd", 3);
+ doTestLimitedIO("foo bar baz", 10);
+ doTestLimitedIO("1", 0);
}
public void testCompare() throws Exception {
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index cc0c5c9f54..5d3d335b32 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -18,50 +18,55 @@
package org.apache.hadoop.ipc;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
+import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotSame;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
import java.io.Closeable;
import java.io.IOException;
-import java.net.ConnectException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
import java.lang.management.ManagementFactory;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.InvocationHandler;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
+import java.net.ConnectException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
import java.util.Arrays;
import javax.net.SocketFactory;
-import org.apache.commons.logging.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.UTF8;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslImpl;
-import org.apache.hadoop.ipc.TestSaslRPC.TestSaslProtocol;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.security.token.SecretManager;
import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.MockitoUtil;
import org.junit.Test;
-import static org.junit.Assert.*;
import com.google.protobuf.DescriptorProtos;
import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
-import static org.apache.hadoop.test.MetricsAsserts.*;
-
/** Unit tests for RPC. */
@SuppressWarnings("deprecation")
public class TestRPC {
@@ -250,7 +255,8 @@ public Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs
@Override
public ProtocolProxy getProxy(Class protocol, long clientVersion,
InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
- SocketFactory factory, int rpcTimeout) throws IOException {
+ SocketFactory factory, int rpcTimeout, RetryPolicy connectionRetryPolicy
+ ) throws IOException {
T proxy = (T) Proxy.newProxyInstance(protocol.getClassLoader(),
new Class[] { protocol }, new StoppedInvocationHandler());
return new ProtocolProxy(protocol, proxy, false);
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
index e2388ad550..91268109bd 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.security.token.delegation;
import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.DataInput;
import java.io.DataInputStream;
import java.io.DataOutput;
@@ -387,4 +388,46 @@ public void testDelegationTokenNullRenewer() throws Exception {
}
}
+ private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner,
+ Text renewer, Text realUser) throws IOException {
+ TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier(
+ owner, renewer, realUser);
+ DataOutputBuffer out = new DataOutputBuffer();
+ dtid.writeImpl(out);
+ DataInputBuffer in = new DataInputBuffer();
+ in.reset(out.getData(), out.getLength());
+ try {
+ TestDelegationTokenIdentifier dtid2 =
+ new TestDelegationTokenIdentifier();
+ dtid2.readFields(in);
+ assertTrue(dtid.equals(dtid2));
+ return true;
+ } catch(IOException e){
+ return false;
+ }
+ }
+
+ @Test
+ public void testSimpleDtidSerialization() throws IOException {
+ assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+ new Text("owner"), new Text("renewer"), new Text("realUser")));
+ assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+ new Text(""), new Text(""), new Text("")));
+ assertTrue(testDelegationTokenIdentiferSerializationRoundTrip(
+ new Text(""), new Text("b"), new Text("")));
+ }
+
+ @Test
+ public void testOverlongDtidSerialization() throws IOException {
+ byte[] bigBuf = new byte[Text.DEFAULT_MAX_LEN + 1];
+ for (int i = 0; i < bigBuf.length; i++) {
+ bigBuf[i] = 0;
+ }
+ assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+ new Text(bigBuf), new Text("renewer"), new Text("realUser")));
+ assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+ new Text("owner"), new Text(bigBuf), new Text("realUser")));
+ assertFalse(testDelegationTokenIdentiferSerializationRoundTrip(
+ new Text("owner"), new Text("renewer"), new Text(bigBuf)));
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
index 37a561eaf7..717838251c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/JarFinder.java
@@ -15,15 +15,18 @@
import com.google.common.base.Preconditions;
+import java.io.BufferedOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.io.OutputStream;
import java.net.URL;
import java.net.URLDecoder;
import java.text.MessageFormat;
import java.util.Enumeration;
+import java.util.jar.JarFile;
import java.util.jar.JarOutputStream;
import java.util.jar.Manifest;
import java.util.zip.ZipEntry;
@@ -37,10 +40,37 @@
*/
public class JarFinder {
- private static void zipDir(File dir, String relativePath, ZipOutputStream zos)
+ private static void copyToZipStream(InputStream is, ZipEntry entry,
+ ZipOutputStream zos) throws IOException {
+ zos.putNextEntry(entry);
+ byte[] arr = new byte[4096];
+ int read = is.read(arr);
+ while (read > -1) {
+ zos.write(arr, 0, read);
+ read = is.read(arr);
+ }
+ is.close();
+ zos.closeEntry();
+ }
+
+ public static void jarDir(File dir, String relativePath, ZipOutputStream zos)
throws IOException {
Preconditions.checkNotNull(relativePath, "relativePath");
Preconditions.checkNotNull(zos, "zos");
+
+ // by JAR spec, if there is a manifest, it must be the first entry in the
+ // ZIP.
+ File manifestFile = new File(dir, JarFile.MANIFEST_NAME);
+ ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME);
+ if (!manifestFile.exists()) {
+ zos.putNextEntry(manifestEntry);
+ new Manifest().write(new BufferedOutputStream(zos));
+ zos.closeEntry();
+ } else {
+ InputStream is = new FileInputStream(manifestFile);
+ copyToZipStream(is, manifestEntry, zos);
+ }
+ zos.closeEntry();
zipDir(dir, relativePath, zos, true);
zos.close();
}
@@ -62,17 +92,12 @@ private static void zipDir(File dir, String relativePath, ZipOutputStream zos,
zipDir(file, relativePath + f.getName() + "/", zos, false);
}
else {
- ZipEntry anEntry = new ZipEntry(relativePath + f.getName());
- zos.putNextEntry(anEntry);
- InputStream is = new FileInputStream(f);
- byte[] arr = new byte[4096];
- int read = is.read(arr);
- while (read > -1) {
- zos.write(arr, 0, read);
- read = is.read(arr);
+ String path = relativePath + f.getName();
+ if (!path.equals(JarFile.MANIFEST_NAME)) {
+ ZipEntry anEntry = new ZipEntry(path);
+ InputStream is = new FileInputStream(f);
+ copyToZipStream(is, anEntry, zos);
}
- is.close();
- zos.closeEntry();
}
}
}
@@ -88,9 +113,8 @@ private static void createJar(File dir, File jarFile) throws IOException {
jarDir));
}
}
- JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile),
- new Manifest());
- zipDir(dir, "", zos);
+ JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile));
+ jarDir(dir, "", zos);
}
/**
@@ -142,5 +166,4 @@ else if ("file".equals(url.getProtocol())) {
}
return null;
}
-
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
index a311a9f712..4997b7a824 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestJarFinder.java
@@ -22,21 +22,105 @@
import org.junit.Assert;
import org.junit.Test;
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
import java.io.File;
+import java.io.FileOutputStream;
+import java.io.FileWriter;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.Writer;
+import java.text.MessageFormat;
+import java.util.Properties;
+import java.util.jar.JarInputStream;
+import java.util.jar.JarOutputStream;
+import java.util.jar.Manifest;
public class TestJarFinder {
@Test
- public void testAppend() throws Exception {
+ public void testJar() throws Exception {
//picking a class that is for sure in a JAR in the classpath
String jar = JarFinder.getJar(LogFactory.class);
Assert.assertTrue(new File(jar).exists());
+ }
+
+ private static void delete(File file) throws IOException {
+ if (file.getAbsolutePath().length() < 5) {
+ throw new IllegalArgumentException(
+ MessageFormat.format("Path [{0}] is too short, not deleting",
+ file.getAbsolutePath()));
+ }
+ if (file.exists()) {
+ if (file.isDirectory()) {
+ File[] children = file.listFiles();
+ if (children != null) {
+ for (File child : children) {
+ delete(child);
+ }
+ }
+ }
+ if (!file.delete()) {
+ throw new RuntimeException(
+ MessageFormat.format("Could not delete path [{0}]",
+ file.getAbsolutePath()));
+ }
+ }
+ }
+ @Test
+ public void testExpandedClasspath() throws Exception {
//picking a class that is for sure in a directory in the classpath
//in this case the JAR is created on the fly
- jar = JarFinder.getJar(TestJarFinder.class);
+ String jar = JarFinder.getJar(TestJarFinder.class);
Assert.assertTrue(new File(jar).exists());
}
+ @Test
+ public void testExistingManifest() throws Exception {
+ File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+ TestJarFinder.class.getName() + "-testExistingManifest");
+ delete(dir);
+ dir.mkdirs();
+
+ File metaInfDir = new File(dir, "META-INF");
+ metaInfDir.mkdirs();
+ File manifestFile = new File(metaInfDir, "MANIFEST.MF");
+ Manifest manifest = new Manifest();
+ OutputStream os = new FileOutputStream(manifestFile);
+ manifest.write(os);
+ os.close();
+
+ File propsFile = new File(dir, "props.properties");
+ Writer writer = new FileWriter(propsFile);
+ new Properties().store(writer, "");
+ writer.close();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ JarOutputStream zos = new JarOutputStream(baos);
+ JarFinder.jarDir(dir, "", zos);
+ JarInputStream jis =
+ new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ Assert.assertNotNull(jis.getManifest());
+ jis.close();
+ }
+
+ @Test
+ public void testNoManifest() throws Exception {
+ File dir = new File(System.getProperty("test.build.dir", "target/test-dir"),
+ TestJarFinder.class.getName() + "-testNoManifest");
+ delete(dir);
+ dir.mkdirs();
+ File propsFile = new File(dir, "props.properties");
+ Writer writer = new FileWriter(propsFile);
+ new Properties().store(writer, "");
+ writer.close();
+ ByteArrayOutputStream baos = new ByteArrayOutputStream();
+ JarOutputStream zos = new JarOutputStream(baos);
+ JarFinder.jarDir(dir, "", zos);
+ JarInputStream jis =
+ new JarInputStream(new ByteArrayInputStream(baos.toByteArray()));
+ Assert.assertNotNull(jis.getManifest());
+ jis.close();
+ }
}
diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml
index 7fc6e563db..442cdcac33 100644
--- a/hadoop-dist/pom.xml
+++ b/hadoop-dist/pom.xml
@@ -52,6 +52,11 @@
hadoop-yarn-api
provided
+
+ org.apache.hadoop
+ hadoop-hdfs-raid
+ provided
+
@@ -120,6 +125,7 @@
run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* .
run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* .
+ run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-raid/target/hadoop-hdfs-raid-${project.version}/* .
run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* .
run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* .
echo
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
index 4a1a205551..fa28ba31c2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java
@@ -154,41 +154,33 @@ public static FILE_TYPE getType(FileStatus fileStatus) {
public static final int HTTP_TEMPORARY_REDIRECT = 307;
+ private static final String HTTP_GET = "GET";
+ private static final String HTTP_PUT = "PUT";
+ private static final String HTTP_POST = "POST";
+ private static final String HTTP_DELETE = "DELETE";
- /**
- * Get operations.
- */
- public enum GetOpValues {
- OPEN, GETFILESTATUS, LISTSTATUS, GETHOMEDIRECTORY, GETCONTENTSUMMARY, GETFILECHECKSUM,
- GETDELEGATIONTOKEN, GETFILEBLOCKLOCATIONS, INSTRUMENTATION
- }
+ public enum Operation {
+ OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
+ GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
+ GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET),
+ INSTRUMENTATION(HTTP_GET),
+ APPEND(HTTP_POST),
+ CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
+ SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
+ DELETE(HTTP_DELETE);
- /**
- * Post operations.
- */
- public static enum PostOpValues {
- APPEND
- }
+ private String httpMethod;
- /**
- * Put operations.
- */
- public static enum PutOpValues {
- CREATE, MKDIRS, RENAME, SETOWNER, SETPERMISSION, SETREPLICATION, SETTIMES,
- RENEWDELEGATIONTOKEN, CANCELDELEGATIONTOKEN
- }
+ Operation(String httpMethod) {
+ this.httpMethod = httpMethod;
+ }
+
+ public String getMethod() {
+ return httpMethod;
+ }
- /**
- * Delete operations.
- */
- public static enum DeleteOpValues {
- DELETE
}
- private static final String HTTP_GET = "GET";
- private static final String HTTP_PUT = "PUT";
- private static final String HTTP_POST = "POST";
- private static final String HTTP_DELETE = "DELETE";
private AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
private URI uri;
@@ -402,10 +394,12 @@ public boolean seekToNewSource(long targetPos) throws IOException {
@Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, GetOpValues.OPEN.toString());
- HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ params.put(OP_PARAM, Operation.OPEN.toString());
+ HttpURLConnection conn = getConnection(Operation.OPEN.getMethod(), params,
+ f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
- return new FSDataInputStream(new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
+ return new FSDataInputStream(
+ new HttpFSDataInputStream(conn.getInputStream(), bufferSize));
}
/**
@@ -508,15 +502,18 @@ private FSDataOutputStream uploadData(String method, Path f, Map
* @see #setPermission(Path, FsPermission)
*/
@Override
- public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize,
- short replication, long blockSize, Progressable progress) throws IOException {
+ public FSDataOutputStream create(Path f, FsPermission permission,
+ boolean overwrite, int bufferSize,
+ short replication, long blockSize,
+ Progressable progress) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.CREATE.toString());
+ params.put(OP_PARAM, Operation.CREATE.toString());
params.put(OVERWRITE_PARAM, Boolean.toString(overwrite));
params.put(REPLICATION_PARAM, Short.toString(replication));
params.put(BLOCKSIZE_PARAM, Long.toString(blockSize));
params.put(PERMISSION_PARAM, permissionToString(permission));
- return uploadData(HTTP_PUT, f, params, bufferSize, HttpURLConnection.HTTP_CREATED);
+ return uploadData(Operation.CREATE.getMethod(), f, params, bufferSize,
+ HttpURLConnection.HTTP_CREATED);
}
@@ -532,10 +529,12 @@ public FSDataOutputStream create(Path f, FsPermission permission, boolean overwr
* @throws IOException
*/
@Override
- public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException {
+ public FSDataOutputStream append(Path f, int bufferSize,
+ Progressable progress) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PostOpValues.APPEND.toString());
- return uploadData(HTTP_POST, f, params, bufferSize, HttpURLConnection.HTTP_OK);
+ params.put(OP_PARAM, Operation.APPEND.toString());
+ return uploadData(Operation.APPEND.getMethod(), f, params, bufferSize,
+ HttpURLConnection.HTTP_OK);
}
/**
@@ -545,9 +544,10 @@ public FSDataOutputStream append(Path f, int bufferSize, Progressable progress)
@Override
public boolean rename(Path src, Path dst) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.RENAME.toString());
+ params.put(OP_PARAM, Operation.RENAME.toString());
params.put(DESTINATION_PARAM, dst.toString());
- HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+ HttpURLConnection conn = getConnection(Operation.RENAME.getMethod(),
+ params, src, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(RENAME_JSON);
@@ -580,9 +580,10 @@ public boolean delete(Path f) throws IOException {
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, DeleteOpValues.DELETE.toString());
+ params.put(OP_PARAM, Operation.DELETE.toString());
params.put(RECURSIVE_PARAM, Boolean.toString(recursive));
- HttpURLConnection conn = getConnection(HTTP_DELETE, params, f, true);
+ HttpURLConnection conn = getConnection(Operation.DELETE.getMethod(),
+ params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(DELETE_JSON);
@@ -601,8 +602,9 @@ public boolean delete(Path f, boolean recursive) throws IOException {
@Override
public FileStatus[] listStatus(Path f) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, GetOpValues.LISTSTATUS.toString());
- HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ params.put(OP_PARAM, Operation.LISTSTATUS.toString());
+ HttpURLConnection conn = getConnection(Operation.LISTSTATUS.getMethod(),
+ params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
json = (JSONObject) json.get(FILE_STATUSES_JSON);
@@ -647,9 +649,10 @@ public Path getWorkingDirectory() {
@Override
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.MKDIRS.toString());
+ params.put(OP_PARAM, Operation.MKDIRS.toString());
params.put(PERMISSION_PARAM, permissionToString(permission));
- HttpURLConnection conn = getConnection(HTTP_PUT, params, f, true);
+ HttpURLConnection conn = getConnection(Operation.MKDIRS.getMethod(),
+ params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(MKDIRS_JSON);
@@ -668,8 +671,9 @@ public boolean mkdirs(Path f, FsPermission permission) throws IOException {
@Override
public FileStatus getFileStatus(Path f) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, GetOpValues.GETFILESTATUS.toString());
- HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ params.put(OP_PARAM, Operation.GETFILESTATUS.toString());
+ HttpURLConnection conn = getConnection(Operation.GETFILESTATUS.getMethod(),
+ params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
json = (JSONObject) json.get(FILE_STATUS_JSON);
@@ -684,9 +688,11 @@ public FileStatus getFileStatus(Path f) throws IOException {
@Override
public Path getHomeDirectory() {
Map params = new HashMap();
- params.put(OP_PARAM, GetOpValues.GETHOMEDIRECTORY.toString());
+ params.put(OP_PARAM, Operation.GETHOMEDIRECTORY.toString());
try {
- HttpURLConnection conn = getConnection(HTTP_GET, params, new Path(getUri().toString(), "/"), false);
+ HttpURLConnection conn =
+ getConnection(Operation.GETHOMEDIRECTORY.getMethod(), params,
+ new Path(getUri().toString(), "/"), false);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return new Path((String) json.get(HOME_DIR_JSON));
@@ -704,12 +710,14 @@ public Path getHomeDirectory() {
* @param groupname If it is null, the original groupname remains unchanged.
*/
@Override
- public void setOwner(Path p, String username, String groupname) throws IOException {
+ public void setOwner(Path p, String username, String groupname)
+ throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.SETOWNER.toString());
+ params.put(OP_PARAM, Operation.SETOWNER.toString());
params.put(OWNER_PARAM, username);
params.put(GROUP_PARAM, groupname);
- HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+ HttpURLConnection conn = getConnection(Operation.SETOWNER.getMethod(),
+ params, p, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
}
@@ -722,9 +730,9 @@ public void setOwner(Path p, String username, String groupname) throws IOExcepti
@Override
public void setPermission(Path p, FsPermission permission) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.SETPERMISSION.toString());
+ params.put(OP_PARAM, Operation.SETPERMISSION.toString());
params.put(PERMISSION_PARAM, permissionToString(permission));
- HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+ HttpURLConnection conn = getConnection(Operation.SETPERMISSION.getMethod(), params, p, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
}
@@ -742,10 +750,11 @@ public void setPermission(Path p, FsPermission permission) throws IOException {
@Override
public void setTimes(Path p, long mtime, long atime) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.SETTIMES.toString());
+ params.put(OP_PARAM, Operation.SETTIMES.toString());
params.put(MODIFICATION_TIME_PARAM, Long.toString(mtime));
params.put(ACCESS_TIME_PARAM, Long.toString(atime));
- HttpURLConnection conn = getConnection(HTTP_PUT, params, p, true);
+ HttpURLConnection conn = getConnection(Operation.SETTIMES.getMethod(),
+ params, p, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
}
@@ -761,11 +770,13 @@ public void setTimes(Path p, long mtime, long atime) throws IOException {
* @throws IOException
*/
@Override
- public boolean setReplication(Path src, short replication) throws IOException {
+ public boolean setReplication(Path src, short replication)
+ throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, PutOpValues.SETREPLICATION.toString());
+ params.put(OP_PARAM, Operation.SETREPLICATION.toString());
params.put(REPLICATION_PARAM, Short.toString(replication));
- HttpURLConnection conn = getConnection(HTTP_PUT, params, src, true);
+ HttpURLConnection conn =
+ getConnection(Operation.SETREPLICATION.getMethod(), params, src, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) jsonParse(conn);
return (Boolean) json.get(SET_REPLICATION_JSON);
@@ -814,10 +825,12 @@ private FileStatus createFileStatus(Path parent, JSONObject json) {
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, GetOpValues.GETCONTENTSUMMARY.toString());
- HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ params.put(OP_PARAM, Operation.GETCONTENTSUMMARY.toString());
+ HttpURLConnection conn =
+ getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
- JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
+ JSONObject json =
+ (JSONObject) ((JSONObject) jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
return new ContentSummary((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON),
(Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON),
(Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON),
@@ -830,10 +843,12 @@ public ContentSummary getContentSummary(Path f) throws IOException {
@Override
public FileChecksum getFileChecksum(Path f) throws IOException {
Map params = new HashMap();
- params.put(OP_PARAM, GetOpValues.GETFILECHECKSUM.toString());
- HttpURLConnection conn = getConnection(HTTP_GET, params, f, true);
+ params.put(OP_PARAM, Operation.GETFILECHECKSUM.toString());
+ HttpURLConnection conn =
+ getConnection(Operation.GETFILECHECKSUM.getMethod(), params, f, true);
validateResponse(conn, HttpURLConnection.HTTP_OK);
- final JSONObject json = (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
+ final JSONObject json =
+ (JSONObject) ((JSONObject) jsonParse(conn)).get(FILE_CHECKSUM_JSON);
return new FileChecksum() {
@Override
public String getAlgorithmName() {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
index 7e73666f58..abd382d871 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/CheckUploadContentTypeFilter.java
@@ -30,7 +30,6 @@
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
-import java.net.InetAddress;
import java.util.HashSet;
import java.util.Set;
@@ -43,8 +42,8 @@ public class CheckUploadContentTypeFilter implements Filter {
private static final Set UPLOAD_OPERATIONS = new HashSet();
static {
- UPLOAD_OPERATIONS.add(HttpFSFileSystem.PostOpValues.APPEND.toString());
- UPLOAD_OPERATIONS.add(HttpFSFileSystem.PutOpValues.CREATE.toString());
+ UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.APPEND.toString());
+ UPLOAD_OPERATIONS.add(HttpFSFileSystem.Operation.CREATE.toString());
}
/**
@@ -82,7 +81,7 @@ public void doFilter(ServletRequest request, ServletResponse response,
if (method.equals("PUT") || method.equals("POST")) {
String op = httpReq.getParameter(HttpFSFileSystem.OP_PARAM);
if (op != null && UPLOAD_OPERATIONS.contains(op.toUpperCase())) {
- if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParams.DataParam.NAME))) {
+ if ("true".equalsIgnoreCase(httpReq.getParameter(HttpFSParametersProvider.DataParam.NAME))) {
String contentType = httpReq.getContentType();
contentTypeOK =
HttpFSFileSystem.UPLOAD_CONTENT_TYPE.equalsIgnoreCase(contentType);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
index 26dff496dd..b999a72557 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSExceptionProvider.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.fs.http.server;
+import com.sun.jersey.api.container.ContainerException;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.wsrs.ExceptionProvider;
import org.slf4j.Logger;
@@ -59,6 +60,9 @@ public Response toResponse(Throwable throwable) {
if (throwable instanceof FileSystemAccessException) {
throwable = throwable.getCause();
}
+ if (throwable instanceof ContainerException) {
+ throwable = throwable.getCause();
+ }
if (throwable instanceof SecurityException) {
status = Response.Status.UNAUTHORIZED;
} else if (throwable instanceof FileNotFoundException) {
@@ -67,6 +71,8 @@ public Response toResponse(Throwable throwable) {
status = Response.Status.INTERNAL_SERVER_ERROR;
} else if (throwable instanceof UnsupportedOperationException) {
status = Response.Status.BAD_REQUEST;
+ } else if (throwable instanceof IllegalArgumentException) {
+ status = Response.Status.BAD_REQUEST;
} else {
status = Response.Status.INTERNAL_SERVER_ERROR;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
new file mode 100644
index 0000000000..0ab10179c8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParametersProvider.java
@@ -0,0 +1,398 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.http.server;
+
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
+import org.apache.hadoop.fs.http.client.HttpFSFileSystem.Operation;
+import org.apache.hadoop.lib.wsrs.BooleanParam;
+import org.apache.hadoop.lib.wsrs.EnumParam;
+import org.apache.hadoop.lib.wsrs.LongParam;
+import org.apache.hadoop.lib.wsrs.Param;
+import org.apache.hadoop.lib.wsrs.ParametersProvider;
+import org.apache.hadoop.lib.wsrs.ShortParam;
+import org.apache.hadoop.lib.wsrs.StringParam;
+import org.apache.hadoop.lib.wsrs.UserProvider;
+import org.slf4j.MDC;
+
+import javax.ws.rs.ext.Provider;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.regex.Pattern;
+
+/**
+ * HttpFS ParametersProvider.
+ */
+@Provider
+public class HttpFSParametersProvider extends ParametersProvider {
+
+ private static final Map>[]> PARAMS_DEF =
+ new HashMap>[]>();
+
+ static {
+ PARAMS_DEF.put(Operation.OPEN,
+ new Class[]{DoAsParam.class, OffsetParam.class, LenParam.class});
+ PARAMS_DEF.put(Operation.GETFILESTATUS, new Class[]{DoAsParam.class});
+ PARAMS_DEF.put(Operation.LISTSTATUS,
+ new Class[]{DoAsParam.class, FilterParam.class});
+ PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{DoAsParam.class});
+ PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{DoAsParam.class});
+ PARAMS_DEF.put(Operation.GETFILECHECKSUM, new Class[]{DoAsParam.class});
+ PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS,
+ new Class[]{DoAsParam.class});
+ PARAMS_DEF.put(Operation.INSTRUMENTATION, new Class[]{DoAsParam.class});
+ PARAMS_DEF.put(Operation.APPEND,
+ new Class[]{DoAsParam.class, DataParam.class});
+ PARAMS_DEF.put(Operation.CREATE,
+ new Class[]{DoAsParam.class, PermissionParam.class, OverwriteParam.class,
+ ReplicationParam.class, BlockSizeParam.class, DataParam.class});
+ PARAMS_DEF.put(Operation.MKDIRS,
+ new Class[]{DoAsParam.class, PermissionParam.class});
+ PARAMS_DEF.put(Operation.RENAME,
+ new Class[]{DoAsParam.class, DestinationParam.class});
+ PARAMS_DEF.put(Operation.SETOWNER,
+ new Class[]{DoAsParam.class, OwnerParam.class, GroupParam.class});
+ PARAMS_DEF.put(Operation.SETPERMISSION,
+ new Class[]{DoAsParam.class, PermissionParam.class});
+ PARAMS_DEF.put(Operation.SETREPLICATION,
+ new Class[]{DoAsParam.class, ReplicationParam.class});
+ PARAMS_DEF.put(Operation.SETTIMES,
+ new Class[]{DoAsParam.class, ModifiedTimeParam.class,
+ AccessTimeParam.class});
+ PARAMS_DEF.put(Operation.DELETE,
+ new Class[]{DoAsParam.class, RecursiveParam.class});
+ }
+
+ public HttpFSParametersProvider() {
+ super(HttpFSFileSystem.OP_PARAM, HttpFSFileSystem.Operation.class,
+ PARAMS_DEF);
+ }
+
+ /**
+ * Class for access-time parameter.
+ */
+ public static class AccessTimeParam extends LongParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
+ /**
+ * Constructor.
+ */
+ public AccessTimeParam() {
+ super(NAME, -1l);
+ }
+ }
+
+ /**
+ * Class for block-size parameter.
+ */
+ public static class BlockSizeParam extends LongParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public BlockSizeParam() {
+ super(NAME, -1l);
+ }
+ }
+
+ /**
+ * Class for data parameter.
+ */
+ public static class DataParam extends BooleanParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = "data";
+
+ /**
+ * Constructor.
+ */
+ public DataParam() {
+ super(NAME, false);
+ }
+ }
+
+ /**
+ * Class for operation parameter.
+ */
+ public static class OperationParam extends EnumParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.OP_PARAM;
+ /**
+ * Constructor.
+ */
+ public OperationParam(String operation) {
+ super(NAME, HttpFSFileSystem.Operation.class,
+ HttpFSFileSystem.Operation.valueOf(operation.toUpperCase()));
+ }
+ }
+
+ /**
+ * Class for delete's recursive parameter.
+ */
+ public static class RecursiveParam extends BooleanParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public RecursiveParam() {
+ super(NAME, false);
+ }
+ }
+
+ /**
+ * Class for do-as parameter.
+ */
+ public static class DoAsParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public DoAsParam() {
+ super(NAME, null, UserProvider.USER_PATTERN);
+ }
+
+ /**
+ * Delegates to parent and then adds do-as user to
+ * MDC context for logging purposes.
+ *
+ *
+ * @param str parameter value.
+ *
+ * @return parsed parameter
+ */
+ @Override
+ public String parseParam(String str) {
+ String doAs = super.parseParam(str);
+ MDC.put(getName(), (doAs != null) ? doAs : "-");
+ return doAs;
+ }
+ }
+
+ /**
+ * Class for filter parameter.
+ */
+ public static class FilterParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = "filter";
+
+ /**
+ * Constructor.
+ */
+ public FilterParam() {
+ super(NAME, null);
+ }
+
+ }
+
+ /**
+ * Class for group parameter.
+ */
+ public static class GroupParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public GroupParam() {
+ super(NAME, null, UserProvider.USER_PATTERN);
+ }
+
+ }
+
+ /**
+ * Class for len parameter.
+ */
+ public static class LenParam extends LongParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = "len";
+
+ /**
+ * Constructor.
+ */
+ public LenParam() {
+ super(NAME, -1l);
+ }
+ }
+
+ /**
+ * Class for modified-time parameter.
+ */
+ public static class ModifiedTimeParam extends LongParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public ModifiedTimeParam() {
+ super(NAME, -1l);
+ }
+ }
+
+ /**
+ * Class for offset parameter.
+ */
+ public static class OffsetParam extends LongParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = "offset";
+
+ /**
+ * Constructor.
+ */
+ public OffsetParam() {
+ super(NAME, 0l);
+ }
+ }
+
+ /**
+ * Class for overwrite parameter.
+ */
+ public static class OverwriteParam extends BooleanParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public OverwriteParam() {
+ super(NAME, true);
+ }
+ }
+
+ /**
+ * Class for owner parameter.
+ */
+ public static class OwnerParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public OwnerParam() {
+ super(NAME, null, UserProvider.USER_PATTERN);
+ }
+
+ }
+
+ /**
+ * Class for permission parameter.
+ */
+ public static class PermissionParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
+
+ /**
+ * Symbolic Unix permissions regular expression pattern.
+ */
+ private static final Pattern PERMISSION_PATTERN =
+ Pattern.compile(HttpFSFileSystem.DEFAULT_PERMISSION +
+ "|[0-1]?[0-7][0-7][0-7]");
+
+ /**
+ * Constructor.
+ */
+ public PermissionParam() {
+ super(NAME, HttpFSFileSystem.DEFAULT_PERMISSION, PERMISSION_PATTERN);
+ }
+
+ }
+
+ /**
+ * Class for replication parameter.
+ */
+ public static class ReplicationParam extends ShortParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public ReplicationParam() {
+ super(NAME, (short) -1);
+ }
+ }
+
+ /**
+ * Class for to-path parameter.
+ */
+ public static class DestinationParam extends StringParam {
+
+ /**
+ * Parameter name.
+ */
+ public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
+
+ /**
+ * Constructor.
+ */
+ public DestinationParam() {
+ super(NAME, null);
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
deleted file mode 100644
index 3c7b5f7499..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSParams.java
+++ /dev/null
@@ -1,551 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.http.server;
-
-import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.lib.wsrs.BooleanParam;
-import org.apache.hadoop.lib.wsrs.EnumParam;
-import org.apache.hadoop.lib.wsrs.LongParam;
-import org.apache.hadoop.lib.wsrs.ShortParam;
-import org.apache.hadoop.lib.wsrs.StringParam;
-import org.apache.hadoop.lib.wsrs.UserProvider;
-import org.slf4j.MDC;
-
-import java.util.regex.Pattern;
-
-/**
- * HttpFS HTTP Parameters used by {@link HttpFSServer}.
- */
-public class HttpFSParams {
-
- /**
- * To avoid instantiation.
- */
- private HttpFSParams() {
- }
-
- /**
- * Class for access-time parameter.
- */
- public static class AccessTimeParam extends LongParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.ACCESS_TIME_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "-1";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public AccessTimeParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for block-size parameter.
- */
- public static class BlockSizeParam extends LongParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.BLOCKSIZE_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "-1";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public BlockSizeParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for data parameter.
- */
- public static class DataParam extends BooleanParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = "data";
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "false";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public DataParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for DELETE operation parameter.
- */
- public static class DeleteOpParam extends EnumParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public DeleteOpParam(String str) {
- super(NAME, str, HttpFSFileSystem.DeleteOpValues.class);
- }
- }
-
- /**
- * Class for delete's recursive parameter.
- */
- public static class DeleteRecursiveParam extends BooleanParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.RECURSIVE_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "false";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public DeleteRecursiveParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for do-as parameter.
- */
- public static class DoAsParam extends StringParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.DO_AS_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public DoAsParam(String str) {
- super(NAME, str, UserProvider.USER_PATTERN);
- }
-
- /**
- * Delegates to parent and then adds do-as user to
- * MDC context for logging purposes.
- *
- * @param name parameter name.
- * @param str parameter value.
- *
- * @return parsed parameter
- */
- @Override
- public String parseParam(String name, String str) {
- String doAs = super.parseParam(name, str);
- MDC.put(NAME, (doAs != null) ? doAs : "-");
- return doAs;
- }
- }
-
- /**
- * Class for filter parameter.
- */
- public static class FilterParam extends StringParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = "filter";
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "";
-
- /**
- * Constructor.
- *
- * @param expr parameter value.
- */
- public FilterParam(String expr) {
- super(NAME, expr);
- }
-
- }
-
- /**
- * Class for path parameter.
- */
- public static class FsPathParam extends StringParam {
-
- /**
- * Constructor.
- *
- * @param path parameter value.
- */
- public FsPathParam(String path) {
- super("path", path);
- }
-
- /**
- * Makes the path absolute adding '/' to it.
- *
- * This is required because JAX-RS resolution of paths does not add
- * the root '/'.
- */
- public void makeAbsolute() {
- String path = value();
- path = "/" + ((path != null) ? path : "");
- setValue(path);
- }
-
- }
-
- /**
- * Class for GET operation parameter.
- */
- public static class GetOpParam extends EnumParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public GetOpParam(String str) {
- super(NAME, str, HttpFSFileSystem.GetOpValues.class);
- }
- }
-
- /**
- * Class for group parameter.
- */
- public static class GroupParam extends StringParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.GROUP_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public GroupParam(String str) {
- super(NAME, str, UserProvider.USER_PATTERN);
- }
-
- }
-
- /**
- * Class for len parameter.
- */
- public static class LenParam extends LongParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = "len";
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "-1";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public LenParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for modified-time parameter.
- */
- public static class ModifiedTimeParam extends LongParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.MODIFICATION_TIME_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "-1";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public ModifiedTimeParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for offset parameter.
- */
- public static class OffsetParam extends LongParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = "offset";
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "0";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public OffsetParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for overwrite parameter.
- */
- public static class OverwriteParam extends BooleanParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.OVERWRITE_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "true";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public OverwriteParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for owner parameter.
- */
- public static class OwnerParam extends StringParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.OWNER_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public OwnerParam(String str) {
- super(NAME, str, UserProvider.USER_PATTERN);
- }
-
- }
-
- /**
- * Class for permission parameter.
- */
- public static class PermissionParam extends StringParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.PERMISSION_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = HttpFSFileSystem.DEFAULT_PERMISSION;
-
-
- /**
- * Symbolic Unix permissions regular expression pattern.
- */
- private static final Pattern PERMISSION_PATTERN =
- Pattern.compile(DEFAULT + "|[0-1]?[0-7][0-7][0-7]");
-
- /**
- * Constructor.
- *
- * @param permission parameter value.
- */
- public PermissionParam(String permission) {
- super(NAME, permission.toLowerCase(), PERMISSION_PATTERN);
- }
-
- }
-
- /**
- * Class for POST operation parameter.
- */
- public static class PostOpParam extends EnumParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public PostOpParam(String str) {
- super(NAME, str, HttpFSFileSystem.PostOpValues.class);
- }
- }
-
- /**
- * Class for PUT operation parameter.
- */
- public static class PutOpParam extends EnumParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.OP_PARAM;
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public PutOpParam(String str) {
- super(NAME, str, HttpFSFileSystem.PutOpValues.class);
- }
- }
-
- /**
- * Class for replication parameter.
- */
- public static class ReplicationParam extends ShortParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.REPLICATION_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "-1";
-
- /**
- * Constructor.
- *
- * @param str parameter value.
- */
- public ReplicationParam(String str) {
- super(NAME, str);
- }
- }
-
- /**
- * Class for to-path parameter.
- */
- public static class ToPathParam extends StringParam {
-
- /**
- * Parameter name.
- */
- public static final String NAME = HttpFSFileSystem.DESTINATION_PARAM;
-
- /**
- * Default parameter value.
- */
- public static final String DEFAULT = "";
-
- /**
- * Constructor.
- *
- * @param path parameter value.
- */
- public ToPathParam(String path) {
- super(NAME, path);
- }
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
index cf9048528b..22a173ac8a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/HttpFSServer.java
@@ -21,26 +21,22 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.http.client.HttpFSFileSystem;
-import org.apache.hadoop.fs.http.server.HttpFSParams.AccessTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.BlockSizeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DataParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DeleteRecursiveParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.DoAsParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FilterParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.FsPathParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GetOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.GroupParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.LenParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ModifiedTimeParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OffsetParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OverwriteParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.OwnerParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PermissionParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PostOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.PutOpParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ReplicationParam;
-import org.apache.hadoop.fs.http.server.HttpFSParams.ToPathParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OperationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.AccessTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.BlockSizeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DataParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.RecursiveParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DoAsParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.FilterParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.GroupParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.LenParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ModifiedTimeParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OffsetParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OverwriteParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.OwnerParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.PermissionParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.ReplicationParam;
+import org.apache.hadoop.fs.http.server.HttpFSParametersProvider.DestinationParam;
import org.apache.hadoop.lib.service.FileSystemAccess;
import org.apache.hadoop.lib.service.FileSystemAccessException;
import org.apache.hadoop.lib.service.Groups;
@@ -49,6 +45,7 @@
import org.apache.hadoop.lib.servlet.FileSystemReleaseFilter;
import org.apache.hadoop.lib.servlet.HostnameFilter;
import org.apache.hadoop.lib.wsrs.InputStreamEntity;
+import org.apache.hadoop.lib.wsrs.Parameters;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.json.simple.JSONObject;
import org.slf4j.Logger;
@@ -57,7 +54,6 @@
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
-import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
@@ -89,39 +85,6 @@
public class HttpFSServer {
private static Logger AUDIT_LOG = LoggerFactory.getLogger("httpfsaudit");
- /**
- * Special binding for '/' as it is not handled by the wildcard binding.
- *
- * @param user principal making the request.
- * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
- * @param filter Glob filter, default value is none. Used only if the
- * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
- * @param doAs user being impersonated, defualt value is none. It can be used
- * only if the current user is a HttpFSServer proxyuser.
- *
- * @return the request response
- *
- * @throws IOException thrown if an IO error occurred. Thrown exceptions are
- * handled by {@link HttpFSExceptionProvider}.
- * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
- * exceptions are handled by {@link HttpFSExceptionProvider}.
- */
- @GET
- @Path("/")
- @Produces(MediaType.APPLICATION_JSON)
- public Response root(@Context Principal user,
- @QueryParam(GetOpParam.NAME) GetOpParam op,
- @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
- @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
- throws IOException, FileSystemAccessException {
- return get(user, new FsPathParam(""), op, new OffsetParam(OffsetParam.DEFAULT),
- new LenParam(LenParam.DEFAULT), filter, doAs,
- new OverwriteParam(OverwriteParam.DEFAULT),
- new BlockSizeParam(BlockSizeParam.DEFAULT),
- new PermissionParam(PermissionParam.DEFAULT),
- new ReplicationParam(ReplicationParam.DEFAULT));
- }
-
/**
* Resolves the effective user that will be used to request a FileSystemAccess filesystem.
*
@@ -207,402 +170,405 @@ private FileSystem createFileSystem(Principal user, String doAs) throws IOExcept
return fs;
}
+ private void enforceRootPath(HttpFSFileSystem.Operation op, String path) {
+ if (!path.equals("/")) {
+ throw new UnsupportedOperationException(
+ MessageFormat.format("Operation [{0}], invalid path [{1}], must be '/'",
+ op, path));
+ }
+ }
+
/**
- * Binding to handle all GET requests, supported operations are
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues}.
- *
- * The @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#INSTRUMENTATION} operation is available only
- * to users that are in HttpFSServer's admin group (see {@link HttpFSServer}. It returns
- * HttpFSServer instrumentation data. The specified path must be '/'.
+ * Special binding for '/' as it is not handled by the wildcard binding.
*
- * @param user principal making the request.
- * @param path path for the GET request.
- * @param op GET operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}.
- * @param offset of the file being fetch, used only with
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN} operations.
- * @param len amounts of bytes, used only with @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#OPEN}
- * operations.
- * @param filter Glob filter, default value is none. Used only if the
- * operation is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.GetOpValues#LISTSTATUS}
- * @param doAs user being impersonated, defualt value is none. It can be used
- * only if the current user is a HttpFSServer proxyuser.
- * @param override default is true. Used only for
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
- * @param blockSize block size to set, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
- * @param permission permission to set, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
- * @param replication replication factor to set, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
+ * @param user the principal of the user making the request.
+ * @param op the HttpFS operation of the request.
+ * @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
- * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
- * exceptions are handled by {@link HttpFSExceptionProvider}.
+ * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+ * error occurred. Thrown exceptions are handled by
+ * {@link HttpFSExceptionProvider}.
+ */
+ @GET
+ @Path("/")
+ @Produces(MediaType.APPLICATION_JSON)
+ public Response getRoot(@Context Principal user,
+ @QueryParam(OperationParam.NAME) OperationParam op,
+ @Context Parameters params)
+ throws IOException, FileSystemAccessException {
+ return get(user, "", op, params);
+ }
+
+ private String makeAbsolute(String path) {
+ return "/" + ((path != null) ? path : "");
+ }
+
+ /**
+ * Binding to handle GET requests, supported operations are
+ *
+ * @param user the principal of the user making the request.
+ * @param path the path for operation.
+ * @param op the HttpFS operation of the request.
+ * @param params the HttpFS parameters of the request.
+ *
+ * @return the request response.
+ *
+ * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+ * handled by {@link HttpFSExceptionProvider}.
+ * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+ * error occurred. Thrown exceptions are handled by
+ * {@link HttpFSExceptionProvider}.
*/
@GET
@Path("{path:.*}")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
public Response get(@Context Principal user,
- @PathParam("path") @DefaultValue("") FsPathParam path,
- @QueryParam(GetOpParam.NAME) GetOpParam op,
- @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT) OffsetParam offset,
- @QueryParam(LenParam.NAME) @DefaultValue(LenParam.DEFAULT) LenParam len,
- @QueryParam(FilterParam.NAME) @DefaultValue(FilterParam.DEFAULT) FilterParam filter,
- @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs,
-
- //these params are only for createHandle operation acceptance purposes
- @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
- @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
- @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
- PermissionParam permission,
- @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
- ReplicationParam replication
- )
+ @PathParam("path") String path,
+ @QueryParam(OperationParam.NAME) OperationParam op,
+ @Context Parameters params)
throws IOException, FileSystemAccessException {
- Response response = null;
- if (op == null) {
- throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", GetOpParam.NAME));
- } else {
- path.makeAbsolute();
- MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
- switch (op.value()) {
- case OPEN: {
- //Invoking the command directly using an unmanaged FileSystem that is released by the
- //FileSystemReleaseFilter
- FSOperations.FSOpen command = new FSOperations.FSOpen(path.value());
- FileSystem fs = createFileSystem(user, doAs.value());
- InputStream is = command.execute(fs);
- AUDIT_LOG.info("[{}] offset [{}] len [{}]", new Object[]{path, offset, len});
- InputStreamEntity entity = new InputStreamEntity(is, offset.value(), len.value());
- response = Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
- break;
- }
- case GETFILESTATUS: {
- FSOperations.FSFileStatus command = new FSOperations.FSFileStatus(path.value());
- Map json = fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}]", path);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- break;
- }
- case LISTSTATUS: {
- FSOperations.FSListStatus command = new FSOperations.FSListStatus(path.value(), filter.value());
- Map json = fsExecute(user, doAs.value(), command);
- if (filter.value() == null) {
- AUDIT_LOG.info("[{}]", path);
- } else {
- AUDIT_LOG.info("[{}] filter [{}]", path, filter.value());
- }
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- break;
- }
- case GETHOMEDIRECTORY: {
- FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
- JSONObject json = fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("");
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- break;
- }
- case INSTRUMENTATION: {
- if (!path.value().equals("/")) {
- throw new UnsupportedOperationException(
- MessageFormat.format("Invalid path for {0}={1}, must be '/'",
- GetOpParam.NAME, HttpFSFileSystem.GetOpValues.INSTRUMENTATION));
- }
- Groups groups = HttpFSServerWebApp.get().get(Groups.class);
- List userGroups = groups.getGroups(user.getName());
- if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
- throw new AccessControlException("User not in HttpFSServer admin group");
- }
- Instrumentation instrumentation = HttpFSServerWebApp.get().get(Instrumentation.class);
- Map snapshot = instrumentation.getSnapshot();
- response = Response.ok(snapshot).build();
- break;
- }
- case GETCONTENTSUMMARY: {
- FSOperations.FSContentSummary command = new FSOperations.FSContentSummary(path.value());
- Map json = fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}]", path);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- break;
- }
- case GETFILECHECKSUM: {
- FSOperations.FSFileChecksum command = new FSOperations.FSFileChecksum(path.value());
- Map json = fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}]", path);
- response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
- break;
- }
- case GETDELEGATIONTOKEN: {
- response = Response.status(Response.Status.BAD_REQUEST).build();
- break;
- }
- case GETFILEBLOCKLOCATIONS: {
- response = Response.status(Response.Status.BAD_REQUEST).build();
- break;
+ Response response;
+ path = makeAbsolute(path);
+ MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+ String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+ switch (op.value()) {
+ case OPEN: {
+ //Invoking the command directly using an unmanaged FileSystem that is
+ // released by the FileSystemReleaseFilter
+ FSOperations.FSOpen command = new FSOperations.FSOpen(path);
+ FileSystem fs = createFileSystem(user, doAs);
+ InputStream is = command.execute(fs);
+ Long offset = params.get(OffsetParam.NAME, OffsetParam.class);
+ Long len = params.get(LenParam.NAME, LenParam.class);
+ AUDIT_LOG.info("[{}] offset [{}] len [{}]",
+ new Object[]{path, offset, len});
+ InputStreamEntity entity = new InputStreamEntity(is, offset, len);
+ response =
+ Response.ok(entity).type(MediaType.APPLICATION_OCTET_STREAM).build();
+ break;
+ }
+ case GETFILESTATUS: {
+ FSOperations.FSFileStatus command =
+ new FSOperations.FSFileStatus(path);
+ Map json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}]", path);
+ response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+ break;
+ }
+ case LISTSTATUS: {
+ String filter = params.get(FilterParam.NAME, FilterParam.class);
+ FSOperations.FSListStatus command = new FSOperations.FSListStatus(
+ path, filter);
+ Map json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}] filter [{}]", path,
+ (filter != null) ? filter : "-");
+ response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+ break;
+ }
+ case GETHOMEDIRECTORY: {
+ enforceRootPath(op.value(), path);
+ FSOperations.FSHomeDir command = new FSOperations.FSHomeDir();
+ JSONObject json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("");
+ response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+ break;
+ }
+ case INSTRUMENTATION: {
+ enforceRootPath(op.value(), path);
+ Groups groups = HttpFSServerWebApp.get().get(Groups.class);
+ List userGroups = groups.getGroups(user.getName());
+ if (!userGroups.contains(HttpFSServerWebApp.get().getAdminGroup())) {
+ throw new AccessControlException(
+ "User not in HttpFSServer admin group");
}
+ Instrumentation instrumentation =
+ HttpFSServerWebApp.get().get(Instrumentation.class);
+ Map snapshot = instrumentation.getSnapshot();
+ response = Response.ok(snapshot).build();
+ break;
+ }
+ case GETCONTENTSUMMARY: {
+ FSOperations.FSContentSummary command =
+ new FSOperations.FSContentSummary(path);
+ Map json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}]", path);
+ response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+ break;
+ }
+ case GETFILECHECKSUM: {
+ FSOperations.FSFileChecksum command =
+ new FSOperations.FSFileChecksum(path);
+ Map json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}]", path);
+ response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
+ break;
+ }
+ case GETFILEBLOCKLOCATIONS: {
+ response = Response.status(Response.Status.BAD_REQUEST).build();
+ break;
+ }
+ default: {
+ throw new IOException(
+ MessageFormat.format("Invalid HTTP GET operation [{0}]",
+ op.value()));
}
- return response;
}
+ return response;
}
- /**
- * Creates the URL for an upload operation (create or append).
- *
- * @param uriInfo uri info of the request.
- * @param uploadOperation operation for the upload URL.
- *
- * @return the URI for uploading data.
- */
- protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum> uploadOperation) {
- UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
- uriBuilder = uriBuilder.replaceQueryParam(PutOpParam.NAME, uploadOperation).
- queryParam(DataParam.NAME, Boolean.TRUE);
- return uriBuilder.build(null);
- }
/**
- * Binding to handle all DELETE requests.
+ * Binding to handle DELETE requests.
*
- * @param user principal making the request.
- * @param path path for the DELETE request.
- * @param op DELETE operation, default value is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.DeleteOpValues#DELETE}.
- * @param recursive indicates if the delete is recursive, default is false
- * @param doAs user being impersonated, defualt value is none. It can be used
- * only if the current user is a HttpFSServer proxyuser.
+ * @param user the principal of the user making the request.
+ * @param path the path for operation.
+ * @param op the HttpFS operation of the request.
+ * @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
- * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
- * exceptions are handled by {@link HttpFSExceptionProvider}.
+ * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+ * error occurred. Thrown exceptions are handled by
+ * {@link HttpFSExceptionProvider}.
*/
@DELETE
@Path("{path:.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(@Context Principal user,
- @PathParam("path") FsPathParam path,
- @QueryParam(DeleteOpParam.NAME) DeleteOpParam op,
- @QueryParam(DeleteRecursiveParam.NAME) @DefaultValue(DeleteRecursiveParam.DEFAULT)
- DeleteRecursiveParam recursive,
- @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+ @PathParam("path") String path,
+ @QueryParam(OperationParam.NAME) OperationParam op,
+ @Context Parameters params)
throws IOException, FileSystemAccessException {
- Response response = null;
- if (op == null) {
- throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", DeleteOpParam.NAME));
- }
+ Response response;
+ path = makeAbsolute(path);
+ MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+ String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) {
case DELETE: {
- path.makeAbsolute();
- MDC.put(HttpFSFileSystem.OP_PARAM, "DELETE");
+ Boolean recursive =
+ params.get(RecursiveParam.NAME, RecursiveParam.class);
AUDIT_LOG.info("[{}] recursive [{}]", path, recursive);
- FSOperations.FSDelete command = new FSOperations.FSDelete(path.value(), recursive.value());
- JSONObject json = fsExecute(user, doAs.value(), command);
+ FSOperations.FSDelete command =
+ new FSOperations.FSDelete(path, recursive);
+ JSONObject json = fsExecute(user, doAs, command);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
+ default: {
+ throw new IOException(
+ MessageFormat.format("Invalid HTTP DELETE operation [{0}]",
+ op.value()));
+ }
}
return response;
}
+ /**
+ * Binding to handle POST requests.
+ *
+ * @param is the inputstream for the request payload.
+ * @param user the principal of the user making the request.
+ * @param uriInfo the of the request.
+ * @param path the path for operation.
+ * @param op the HttpFS operation of the request.
+ * @param params the HttpFS parameters of the request.
+ *
+ * @return the request response.
+ *
+ * @throws IOException thrown if an IO error occurred. Thrown exceptions are
+ * handled by {@link HttpFSExceptionProvider}.
+ * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+ * error occurred. Thrown exceptions are handled by
+ * {@link HttpFSExceptionProvider}.
+ */
+ @POST
+ @Path("{path:.*}")
+ @Consumes({"*/*"})
+ @Produces({MediaType.APPLICATION_JSON})
+ public Response post(InputStream is,
+ @Context Principal user,
+ @Context UriInfo uriInfo,
+ @PathParam("path") String path,
+ @QueryParam(OperationParam.NAME) OperationParam op,
+ @Context Parameters params)
+ throws IOException, FileSystemAccessException {
+ Response response;
+ path = makeAbsolute(path);
+ MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+ String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
+ switch (op.value()) {
+ case APPEND: {
+ boolean hasData = params.get(DataParam.NAME, DataParam.class);
+ if (!hasData) {
+ response = Response.temporaryRedirect(
+ createUploadRedirectionURL(uriInfo,
+ HttpFSFileSystem.Operation.APPEND)).build();
+ } else {
+ FSOperations.FSAppend command =
+ new FSOperations.FSAppend(is, path);
+ fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}]", path);
+ response = Response.ok().type(MediaType.APPLICATION_JSON).build();
+ }
+ break;
+ }
+ default: {
+ throw new IOException(
+ MessageFormat.format("Invalid HTTP POST operation [{0}]",
+ op.value()));
+ }
+ }
+ return response;
+ }
/**
- * Binding to handle all PUT requests, supported operations are
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues}.
+ * Creates the URL for an upload operation (create or append).
*
- * @param is request input stream, used only for
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
- * @param user principal making the request.
- * @param uriInfo the request uriInfo.
- * @param path path for the PUT request.
- * @param op PUT operation, no default value.
- * @param toPath new path, used only for
- * {@link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#RENAME} operations.
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
- * @param owner owner to set, used only for
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
- * @param group group to set, used only for
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETOWNER} operations.
- * @param override default is true. Used only for
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
- * @param blockSize block size to set, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#CREATE} operations.
- * @param permission permission to set, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETPERMISSION}.
- * @param replication replication factor to set, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETREPLICATION}.
- * @param modifiedTime modified time, in seconds since EPOC, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
- * @param accessTime accessed time, in seconds since EPOC, used only by
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PutOpValues#SETTIMES}.
- * @param hasData indicates if the append request is uploading data or not
- * (just getting the handle).
- * @param doAs user being impersonated, defualt value is none. It can be used
- * only if the current user is a HttpFSServer proxyuser.
+ * @param uriInfo uri info of the request.
+ * @param uploadOperation operation for the upload URL.
+ *
+ * @return the URI for uploading data.
+ */
+ protected URI createUploadRedirectionURL(UriInfo uriInfo, Enum> uploadOperation) {
+ UriBuilder uriBuilder = uriInfo.getRequestUriBuilder();
+ uriBuilder = uriBuilder.replaceQueryParam(OperationParam.NAME, uploadOperation).
+ queryParam(DataParam.NAME, Boolean.TRUE);
+ return uriBuilder.build(null);
+ }
+
+
+ /**
+ * Binding to handle PUT requests.
+ *
+ * @param is the inputstream for the request payload.
+ * @param user the principal of the user making the request.
+ * @param uriInfo the of the request.
+ * @param path the path for operation.
+ * @param op the HttpFS operation of the request.
+ * @param params the HttpFS parameters of the request.
*
* @return the request response.
*
* @throws IOException thrown if an IO error occurred. Thrown exceptions are
* handled by {@link HttpFSExceptionProvider}.
- * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
- * exceptions are handled by {@link HttpFSExceptionProvider}.
+ * @throws FileSystemAccessException thrown if a FileSystemAccess releated
+ * error occurred. Thrown exceptions are handled by
+ * {@link HttpFSExceptionProvider}.
*/
@PUT
@Path("{path:.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
public Response put(InputStream is,
- @Context Principal user,
- @Context UriInfo uriInfo,
- @PathParam("path") FsPathParam path,
- @QueryParam(PutOpParam.NAME) PutOpParam op,
- @QueryParam(ToPathParam.NAME) @DefaultValue(ToPathParam.DEFAULT) ToPathParam toPath,
- @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT) OwnerParam owner,
- @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT) GroupParam group,
- @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT) OverwriteParam override,
- @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT) BlockSizeParam blockSize,
- @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
- PermissionParam permission,
- @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
- ReplicationParam replication,
- @QueryParam(ModifiedTimeParam.NAME) @DefaultValue(ModifiedTimeParam.DEFAULT)
- ModifiedTimeParam modifiedTime,
- @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
- AccessTimeParam accessTime,
- @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
- @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
+ @Context Principal user,
+ @Context UriInfo uriInfo,
+ @PathParam("path") String path,
+ @QueryParam(OperationParam.NAME) OperationParam op,
+ @Context Parameters params)
throws IOException, FileSystemAccessException {
- Response response = null;
- if (op == null) {
- throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PutOpParam.NAME));
- }
- path.makeAbsolute();
+ Response response;
+ path = makeAbsolute(path);
MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
+ String doAs = params.get(DoAsParam.NAME, DoAsParam.class);
switch (op.value()) {
case CREATE: {
- if (!hasData.value()) {
+ boolean hasData = params.get(DataParam.NAME, DataParam.class);
+ if (!hasData) {
response = Response.temporaryRedirect(
- createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PutOpValues.CREATE)).build();
+ createUploadRedirectionURL(uriInfo,
+ HttpFSFileSystem.Operation.CREATE)).build();
} else {
- FSOperations.FSCreate
- command = new FSOperations.FSCreate(is, path.value(), permission.value(), override.value(),
- replication.value(), blockSize.value());
- fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
- new Object[]{path, permission, override, replication, blockSize});
+ String permission = params.get(PermissionParam.NAME,
+ PermissionParam.class);
+ boolean override = params.get(OverwriteParam.NAME,
+ OverwriteParam.class);
+ short replication = params.get(ReplicationParam.NAME,
+ ReplicationParam.class);
+ long blockSize = params.get(BlockSizeParam.NAME,
+ BlockSizeParam.class);
+ FSOperations.FSCreate command =
+ new FSOperations.FSCreate(is, path, permission, override,
+ replication, blockSize);
+ fsExecute(user, doAs, command);
+ AUDIT_LOG.info(
+ "[{}] permission [{}] override [{}] replication [{}] blockSize [{}]",
+ new Object[]{path, permission, override, replication, blockSize});
response = Response.status(Response.Status.CREATED).build();
}
break;
}
case MKDIRS: {
- FSOperations.FSMkdirs command = new FSOperations.FSMkdirs(path.value(), permission.value());
- JSONObject json = fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}] permission [{}]", path, permission.value());
+ String permission = params.get(PermissionParam.NAME,
+ PermissionParam.class);
+ FSOperations.FSMkdirs command =
+ new FSOperations.FSMkdirs(path, permission);
+ JSONObject json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}] permission [{}]", path, permission);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case RENAME: {
- FSOperations.FSRename command = new FSOperations.FSRename(path.value(), toPath.value());
- JSONObject json = fsExecute(user, doAs.value(), command);
+ String toPath = params.get(DestinationParam.NAME, DestinationParam.class);
+ FSOperations.FSRename command =
+ new FSOperations.FSRename(path, toPath);
+ JSONObject json = fsExecute(user, doAs, command);
AUDIT_LOG.info("[{}] to [{}]", path, toPath);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case SETOWNER: {
- FSOperations.FSSetOwner command = new FSOperations.FSSetOwner(path.value(), owner.value(), group.value());
- fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner.value() + ":" + group.value());
+ String owner = params.get(OwnerParam.NAME, OwnerParam.class);
+ String group = params.get(GroupParam.NAME, GroupParam.class);
+ FSOperations.FSSetOwner command =
+ new FSOperations.FSSetOwner(path, owner, group);
+ fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}] to (O/G)[{}]", path, owner + ":" + group);
response = Response.ok().build();
break;
}
case SETPERMISSION: {
- FSOperations.FSSetPermission command = new FSOperations.FSSetPermission(path.value(), permission.value());
- fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}] to [{}]", path, permission.value());
+ String permission = params.get(PermissionParam.NAME,
+ PermissionParam.class);
+ FSOperations.FSSetPermission command =
+ new FSOperations.FSSetPermission(path, permission);
+ fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}] to [{}]", path, permission);
response = Response.ok().build();
break;
}
case SETREPLICATION: {
- FSOperations.FSSetReplication command = new FSOperations.FSSetReplication(path.value(), replication.value());
- JSONObject json = fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}] to [{}]", path, replication.value());
+ short replication = params.get(ReplicationParam.NAME,
+ ReplicationParam.class);
+ FSOperations.FSSetReplication command =
+ new FSOperations.FSSetReplication(path, replication);
+ JSONObject json = fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}] to [{}]", path, replication);
response = Response.ok(json).build();
break;
}
case SETTIMES: {
- FSOperations.FSSetTimes
- command = new FSOperations.FSSetTimes(path.value(), modifiedTime.value(), accessTime.value());
- fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}] to (M/A)[{}]", path, modifiedTime.value() + ":" + accessTime.value());
+ long modifiedTime = params.get(ModifiedTimeParam.NAME,
+ ModifiedTimeParam.class);
+ long accessTime = params.get(AccessTimeParam.NAME,
+ AccessTimeParam.class);
+ FSOperations.FSSetTimes command =
+ new FSOperations.FSSetTimes(path, modifiedTime, accessTime);
+ fsExecute(user, doAs, command);
+ AUDIT_LOG.info("[{}] to (M/A)[{}]", path,
+ modifiedTime + ":" + accessTime);
response = Response.ok().build();
break;
}
- case RENEWDELEGATIONTOKEN: {
- response = Response.status(Response.Status.BAD_REQUEST).build();
- break;
- }
- case CANCELDELEGATIONTOKEN: {
- response = Response.status(Response.Status.BAD_REQUEST).build();
- break;
- }
- }
- return response;
- }
-
- /**
- * Binding to handle all OPST requests, supported operations are
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues}.
- *
- * @param is request input stream, used only for
- * @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND} operations.
- * @param user principal making the request.
- * @param uriInfo the request uriInfo.
- * @param path path for the POST request.
- * @param op POST operation, default is @link org.apache.hadoop.fs.http.client.HttpFSFileSystem.PostOpValues#APPEND}.
- * @param hasData indicates if the append request is uploading data or not (just getting the handle).
- * @param doAs user being impersonated, defualt value is none. It can be used
- * only if the current user is a HttpFSServer proxyuser.
- *
- * @return the request response.
- *
- * @throws IOException thrown if an IO error occurred. Thrown exceptions are
- * handled by {@link HttpFSExceptionProvider}.
- * @throws FileSystemAccessException thrown if a FileSystemAccess releated error occurred. Thrown
- * exceptions are handled by {@link HttpFSExceptionProvider}.
- */
- @POST
- @Path("{path:.*}")
- @Consumes({"*/*"})
- @Produces({MediaType.APPLICATION_JSON})
- public Response post(InputStream is,
- @Context Principal user,
- @Context UriInfo uriInfo,
- @PathParam("path") FsPathParam path,
- @QueryParam(PostOpParam.NAME) PostOpParam op,
- @QueryParam(DataParam.NAME) @DefaultValue(DataParam.DEFAULT) DataParam hasData,
- @QueryParam(DoAsParam.NAME) @DefaultValue(DoAsParam.DEFAULT) DoAsParam doAs)
- throws IOException, FileSystemAccessException {
- Response response = null;
- if (op == null) {
- throw new UnsupportedOperationException(MessageFormat.format("Missing [{0}] parameter", PostOpParam.NAME));
- }
- path.makeAbsolute();
- MDC.put(HttpFSFileSystem.OP_PARAM, op.value().name());
- switch (op.value()) {
- case APPEND: {
- if (!hasData.value()) {
- response = Response.temporaryRedirect(
- createUploadRedirectionURL(uriInfo, HttpFSFileSystem.PostOpValues.APPEND)).build();
- } else {
- FSOperations.FSAppend command = new FSOperations.FSAppend(is, path.value());
- fsExecute(user, doAs.value(), command);
- AUDIT_LOG.info("[{}]", path);
- response = Response.ok().type(MediaType.APPLICATION_JSON).build();
- }
- break;
+ default: {
+ throw new IOException(
+ MessageFormat.format("Invalid HTTP PUT operation [{0}]",
+ op.value()));
}
}
return response;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
index 7bc3a14757..e4e6355063 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/BooleanParam.java
@@ -22,15 +22,14 @@
public abstract class BooleanParam extends Param {
- public BooleanParam(String name, String str) {
- value = parseParam(name, str);
+ public BooleanParam(String name, Boolean defaultValue) {
+ super(name, defaultValue);
}
protected Boolean parse(String str) throws Exception {
if (str.equalsIgnoreCase("true")) {
return true;
- }
- if (str.equalsIgnoreCase("false")) {
+ } else if (str.equalsIgnoreCase("false")) {
return false;
}
throw new IllegalArgumentException(MessageFormat.format("Invalid value [{0}], must be a boolean", str));
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
index aa9408f32e..96b46c4313 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ByteParam.java
@@ -20,8 +20,8 @@
public abstract class ByteParam extends Param {
- public ByteParam(String name, String str) {
- value = parseParam(name, str);
+ public ByteParam(String name, Byte defaultValue) {
+ super(name, defaultValue);
}
protected Byte parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
index ff86406e4a..f605bd2220 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/EnumParam.java
@@ -25,9 +25,9 @@
public abstract class EnumParam> extends Param {
Class klass;
- public EnumParam(String label, String str, Class e) {
+ public EnumParam(String name, Class e, E defaultValue) {
+ super(name, defaultValue);
klass = e;
- value = parseParam(label, str);
}
protected E parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
index 6eddaa2e5f..7c0f0813c5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/IntegerParam.java
@@ -20,8 +20,8 @@
public abstract class IntegerParam extends Param {
- public IntegerParam(String name, String str) {
- value = parseParam(name, str);
+ public IntegerParam(String name, Integer defaultValue) {
+ super(name, defaultValue);
}
protected Integer parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
index 354a550d7b..ec601bb2ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/LongParam.java
@@ -20,8 +20,8 @@
public abstract class LongParam extends Param {
- public LongParam(String name, String str) {
- value = parseParam(name, str);
+ public LongParam(String name, Long defaultValue) {
+ super(name, defaultValue);
}
protected Long parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
index 68a41d5151..62af4818f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Param.java
@@ -23,32 +23,39 @@
import java.text.MessageFormat;
public abstract class Param {
+ private String name;
protected T value;
- public T parseParam(String name, String str) {
- Check.notNull(name, "name");
+ public Param(String name, T defaultValue) {
+ this.name = name;
+ this.value = defaultValue;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public T parseParam(String str) {
try {
- return (str != null && str.trim().length() > 0) ? parse(str) : null;
+ value = (str != null && str.trim().length() > 0) ? parse(str) : value;
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
name, str, getDomain()));
}
+ return value;
}
public T value() {
return value;
}
- protected void setValue(T value) {
- this.value = value;
- }
-
protected abstract String getDomain();
protected abstract T parse(String str) throws Exception;
public String toString() {
- return value.toString();
+ return (value != null) ? value.toString() : "NULL";
}
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
similarity index 52%
rename from hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java
rename to hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
index bb37f75f37..b5ec214d7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestEnumParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/Parameters.java
@@ -15,38 +15,37 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.lib.wsrs;
+import java.util.Map;
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestEnumParam {
-
- public static enum ENUM {
- FOO, BAR
- }
-
- @Test
- public void param() throws Exception {
- EnumParam param = new EnumParam("p", "FOO", ENUM.class) {
- };
- Assert.assertEquals(param.getDomain(), "FOO,BAR");
- Assert.assertEquals(param.value(), ENUM.FOO);
- Assert.assertEquals(param.toString(), "FOO");
- param = new EnumParam("p", null, ENUM.class) {
- };
- Assert.assertEquals(param.value(), null);
- param = new EnumParam("p", "", ENUM.class) {
- };
- Assert.assertEquals(param.value(), null);
+/**
+ * Class that contains all parsed JAX-RS parameters.
+ *
+ * Instances are created by the {@link ParametersProvider} class.
+ */
+public class Parameters {
+ private Map> params;
+
+ /**
+ * Constructor that receives the request parsed parameters.
+ *
+ * @param params the request parsed parameters.
+ */
+ public Parameters(Map> params) {
+ this.params = params;
}
- @Test(expected = IllegalArgumentException.class)
- public void invalid1() throws Exception {
- new EnumParam("p", "x", ENUM.class) {
- };
+ /**
+ * Returns the value of a request parsed parameter.
+ *
+ * @param name parameter name.
+ * @param klass class of the parameter, used for value casting.
+ * @return the value of the parameter.
+ */
+ @SuppressWarnings("unchecked")
+ public > V get(String name, Class klass) {
+ return ((T)params.get(name)).value();
}
-
+
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
new file mode 100644
index 0000000000..3d41d991ad
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ParametersProvider.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.wsrs;
+
+import com.sun.jersey.api.core.HttpContext;
+import com.sun.jersey.core.spi.component.ComponentContext;
+import com.sun.jersey.core.spi.component.ComponentScope;
+import com.sun.jersey.server.impl.inject.AbstractHttpContextInjectable;
+import com.sun.jersey.spi.inject.Injectable;
+import com.sun.jersey.spi.inject.InjectableProvider;
+
+import javax.ws.rs.core.Context;
+import javax.ws.rs.core.MultivaluedMap;
+import java.lang.reflect.Type;
+import java.text.MessageFormat;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Jersey provider that parses the request parameters based on the
+ * given parameter definition.
+ */
+public class ParametersProvider
+ extends AbstractHttpContextInjectable
+ implements InjectableProvider {
+
+ private String driverParam;
+ private Class extends Enum> enumClass;
+ private Map>[]> paramsDef;
+
+ public ParametersProvider(String driverParam, Class extends Enum> enumClass,
+ Map>[]> paramsDef) {
+ this.driverParam = driverParam;
+ this.enumClass = enumClass;
+ this.paramsDef = paramsDef;
+ }
+
+ @Override
+ @SuppressWarnings("unchecked")
+ public Parameters getValue(HttpContext httpContext) {
+ Map> map = new HashMap>();
+ MultivaluedMap queryString =
+ httpContext.getRequest().getQueryParameters();
+ String str = queryString.getFirst(driverParam);
+ if (str == null) {
+ throw new IllegalArgumentException(
+ MessageFormat.format("Missing Operation parameter [{0}]",
+ driverParam));
+ }
+ Enum op;
+ try {
+ op = Enum.valueOf(enumClass, str.toUpperCase());
+ } catch (IllegalArgumentException ex) {
+ throw new IllegalArgumentException(
+ MessageFormat.format("Invalid Operation [{0}]", str));
+ }
+ if (!paramsDef.containsKey(op)) {
+ throw new IllegalArgumentException(
+ MessageFormat.format("Unsupported Operation [{0}]", op));
+ }
+ for (Class> paramClass : paramsDef.get(op)) {
+ Param> param;
+ try {
+ param = paramClass.newInstance();
+ } catch (Exception ex) {
+ throw new UnsupportedOperationException(
+ MessageFormat.format(
+ "Param class [{0}] does not have default constructor",
+ paramClass.getName()));
+ }
+ try {
+ param.parseParam(queryString.getFirst(param.getName()));
+ }
+ catch (Exception ex) {
+ throw new IllegalArgumentException(ex.toString(), ex);
+ }
+ map.put(param.getName(), param);
+ }
+ return new Parameters(map);
+ }
+
+ @Override
+ public ComponentScope getScope() {
+ return ComponentScope.PerRequest;
+ }
+
+ @Override
+ public Injectable getInjectable(ComponentContext componentContext, Context context, Type type) {
+ return (type.equals(Parameters.class)) ? this : null;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
index a3995baa61..cc75a86062 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/ShortParam.java
@@ -20,8 +20,8 @@
public abstract class ShortParam extends Param {
- public ShortParam(String name, String str) {
- value = parseParam(name, str);
+ public ShortParam(String name, Short defaultValue) {
+ super(name, defaultValue);
}
protected Short parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
index 4b3a9274fe..79e633697f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/StringParam.java
@@ -15,42 +15,38 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-
package org.apache.hadoop.lib.wsrs;
-import org.apache.hadoop.lib.util.Check;
-
import java.text.MessageFormat;
import java.util.regex.Pattern;
public abstract class StringParam extends Param {
private Pattern pattern;
- public StringParam(String name, String str) {
- this(name, str, null);
+ public StringParam(String name, String defaultValue) {
+ this(name, defaultValue, null);
}
- public StringParam(String name, String str, Pattern pattern) {
+ public StringParam(String name, String defaultValue, Pattern pattern) {
+ super(name, defaultValue);
this.pattern = pattern;
- value = parseParam(name, str);
+ parseParam(defaultValue);
}
- public String parseParam(String name, String str) {
- String ret = null;
- Check.notNull(name, "name");
+ public String parseParam(String str) {
try {
if (str != null) {
str = str.trim();
if (str.length() > 0) {
- return parse(str);
+ value = parse(str);
}
}
} catch (Exception ex) {
throw new IllegalArgumentException(
MessageFormat.format("Parameter [{0}], invalid value [{1}], value must be [{2}]",
- name, str, getDomain()));
+ getName(), str, getDomain()));
}
- return ret;
+ return value;
}
protected String parse(String str) throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
index a55d5e2a46..e2f8b842f3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/TestHttpFSFileSystem.java
@@ -475,6 +475,7 @@ public static Collection operations() {
ops[i] = new Object[]{Operation.values()[i]};
}
return Arrays.asList(ops);
+// return Arrays.asList(new Object[][]{ new Object[]{Operation.CREATE}});
}
private Operation operation;
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
index 2596be9754..9996e0bea0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/server/TestCheckUploadContentTypeFilter.java
@@ -31,34 +31,34 @@ public class TestCheckUploadContentTypeFilter {
@Test
public void putUpload() throws Exception {
- test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "application/octet-stream", true, false);
+ test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "application/octet-stream", true, false);
}
@Test
public void postUpload() throws Exception {
- test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
+ test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "APPLICATION/OCTET-STREAM", true, false);
}
@Test
public void putUploadWrong() throws Exception {
- test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", false, false);
- test("PUT", HttpFSFileSystem.PutOpValues.CREATE.toString(), "plain/text", true, true);
+ test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", false, false);
+ test("PUT", HttpFSFileSystem.Operation.CREATE.toString(), "plain/text", true, true);
}
@Test
public void postUploadWrong() throws Exception {
- test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", false, false);
- test("POST", HttpFSFileSystem.PostOpValues.APPEND.toString(), "plain/text", true, true);
+ test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", false, false);
+ test("POST", HttpFSFileSystem.Operation.APPEND.toString(), "plain/text", true, true);
}
@Test
public void getOther() throws Exception {
- test("GET", HttpFSFileSystem.GetOpValues.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
+ test("GET", HttpFSFileSystem.Operation.GETHOMEDIRECTORY.toString(), "plain/text", false, false);
}
@Test
public void putOther() throws Exception {
- test("PUT", HttpFSFileSystem.PutOpValues.MKDIRS.toString(), "plain/text", false, false);
+ test("PUT", HttpFSFileSystem.Operation.MKDIRS.toString(), "plain/text", false, false);
}
private void test(String method, String operation, String contentType,
@@ -68,7 +68,7 @@ private void test(String method, String operation, String contentType,
Mockito.reset(request);
Mockito.when(request.getMethod()).thenReturn(method);
Mockito.when(request.getParameter(HttpFSFileSystem.OP_PARAM)).thenReturn(operation);
- Mockito.when(request.getParameter(HttpFSParams.DataParam.NAME)).
+ Mockito.when(request.getParameter(HttpFSParametersProvider.DataParam.NAME)).
thenReturn(Boolean.toString(upload));
Mockito.when(request.getContentType()).thenReturn(contentType);
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java
deleted file mode 100644
index b1b140d7cd..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestBooleanParam.java
+++ /dev/null
@@ -1,50 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestBooleanParam {
-
- @Test
- public void param() throws Exception {
- BooleanParam param = new BooleanParam("p", "true") {
- };
- Assert.assertEquals(param.getDomain(), "a boolean");
- Assert.assertEquals(param.value(), Boolean.TRUE);
- Assert.assertEquals(param.toString(), "true");
- param = new BooleanParam("p", "false") {
- };
- Assert.assertEquals(param.value(), Boolean.FALSE);
- param = new BooleanParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- param = new BooleanParam("p", "") {
- };
- Assert.assertEquals(param.value(), null);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid() throws Exception {
- new BooleanParam("p", "x") {
- };
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java
deleted file mode 100644
index 6b1a5ef64c..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestByteParam.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestByteParam {
-
- @Test
- public void param() throws Exception {
- ByteParam param = new ByteParam("p", "1") {
- };
- Assert.assertEquals(param.getDomain(), "a byte");
- Assert.assertEquals(param.value(), new Byte((byte) 1));
- Assert.assertEquals(param.toString(), "1");
- param = new ByteParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- param = new ByteParam("p", "") {
- };
- Assert.assertEquals(param.value(), null);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid1() throws Exception {
- new ByteParam("p", "x") {
- };
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid2() throws Exception {
- new ByteParam("p", "256") {
- };
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java
deleted file mode 100644
index 634dbe7c2a..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestIntegerParam.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestIntegerParam {
-
- @Test
- public void param() throws Exception {
- IntegerParam param = new IntegerParam("p", "1") {
- };
- Assert.assertEquals(param.getDomain(), "an integer");
- Assert.assertEquals(param.value(), new Integer(1));
- Assert.assertEquals(param.toString(), "1");
- param = new IntegerParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- param = new IntegerParam("p", "") {
- };
- Assert.assertEquals(param.value(), null);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid1() throws Exception {
- new IntegerParam("p", "x") {
- };
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid2() throws Exception {
- new IntegerParam("p", "" + Long.MAX_VALUE) {
- };
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java
deleted file mode 100644
index 1a7ddd8d35..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestLongParam.java
+++ /dev/null
@@ -1,47 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestLongParam {
-
- @Test
- public void param() throws Exception {
- LongParam param = new LongParam("p", "1") {
- };
- Assert.assertEquals(param.getDomain(), "a long");
- Assert.assertEquals(param.value(), new Long(1));
- Assert.assertEquals(param.toString(), "1");
- param = new LongParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- param = new LongParam("p", "") {
- };
- Assert.assertEquals(param.value(), null);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid1() throws Exception {
- new LongParam("p", "x") {
- };
- }
-
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
new file mode 100644
index 0000000000..ed79c86e7d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestParam.java
@@ -0,0 +1,120 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.lib.wsrs;
+
+import junit.framework.Assert;
+import org.junit.Test;
+
+import java.util.regex.Pattern;
+
+public class TestParam {
+
+ private void test(Param param, String name,
+ String domain, T defaultValue, T validValue,
+ String invalidStrValue, String outOfRangeValue) throws Exception {
+
+ Assert.assertEquals(name, param.getName());
+ Assert.assertEquals(domain, param.getDomain());
+ Assert.assertEquals(defaultValue, param.value());
+ Assert.assertEquals(defaultValue, param.parseParam(""));
+ Assert.assertEquals(defaultValue, param.parseParam(null));
+ Assert.assertEquals(validValue, param.parseParam(validValue.toString()));
+ if (invalidStrValue != null) {
+ try {
+ param.parseParam(invalidStrValue);
+ Assert.fail();
+ } catch (IllegalArgumentException ex) {
+ //NOP
+ } catch (Exception ex) {
+ Assert.fail();
+ }
+ }
+ if (outOfRangeValue != null) {
+ try {
+ param.parseParam(outOfRangeValue);
+ Assert.fail();
+ } catch (IllegalArgumentException ex) {
+ //NOP
+ } catch (Exception ex) {
+ Assert.fail();
+ }
+ }
+ }
+
+ @Test
+ public void testBoolean() throws Exception {
+ Param param = new BooleanParam("b", false) {
+ };
+ test(param, "b", "a boolean", false, true, "x", null);
+ }
+
+ @Test
+ public void testByte() throws Exception {
+ Param param = new ByteParam("B", (byte) 1) {
+ };
+ test(param, "B", "a byte", (byte) 1, (byte) 2, "x", "256");
+ }
+
+ @Test
+ public void testShort() throws Exception {
+ Param param = new ShortParam("S", (short) 1) {
+ };
+ test(param, "S", "a short", (short) 1, (short) 2, "x",
+ "" + ((int)Short.MAX_VALUE + 1));
+ }
+
+ @Test
+ public void testInteger() throws Exception {
+ Param param = new IntegerParam("I", 1) {
+ };
+ test(param, "I", "an integer", 1, 2, "x", "" + ((long)Integer.MAX_VALUE + 1));
+ }
+
+ @Test
+ public void testLong() throws Exception {
+ Param param = new LongParam("L", 1L) {
+ };
+ test(param, "L", "a long", 1L, 2L, "x", null);
+ }
+
+ public static enum ENUM {
+ FOO, BAR
+ }
+
+ @Test
+ public void testEnum() throws Exception {
+ EnumParam param = new EnumParam("e", ENUM.class, ENUM.FOO) {
+ };
+ test(param, "e", "FOO,BAR", ENUM.FOO, ENUM.BAR, "x", null);
+ }
+
+ @Test
+ public void testString() throws Exception {
+ Param param = new StringParam("s", "foo") {
+ };
+ test(param, "s", "a string", "foo", "bar", null, null);
+ }
+
+ @Test
+ public void testRegEx() throws Exception {
+ Param param = new StringParam("r", "aa", Pattern.compile("..")) {
+ };
+ test(param, "r", "..", "aa", "bb", "c", null);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java
deleted file mode 100644
index b37bddffe4..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestShortParam.java
+++ /dev/null
@@ -1,53 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-public class TestShortParam {
-
- @Test
- public void param() throws Exception {
- ShortParam param = new ShortParam("p", "1") {
- };
- Assert.assertEquals(param.getDomain(), "a short");
- Assert.assertEquals(param.value(), new Short((short) 1));
- Assert.assertEquals(param.toString(), "1");
- param = new ShortParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- param = new ShortParam("p", "") {
- };
- Assert.assertEquals(param.value(), null);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid1() throws Exception {
- new ShortParam("p", "x") {
- };
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void invalid2() throws Exception {
- new ShortParam("p", "" + Integer.MAX_VALUE) {
- };
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java
deleted file mode 100644
index feb489e043..0000000000
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/wsrs/TestStringParam.java
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.lib.wsrs;
-
-
-import junit.framework.Assert;
-import org.junit.Test;
-
-import java.util.regex.Pattern;
-
-public class TestStringParam {
-
- @Test
- public void param() throws Exception {
- StringParam param = new StringParam("p", "s") {
- };
- Assert.assertEquals(param.getDomain(), "a string");
- Assert.assertEquals(param.value(), "s");
- Assert.assertEquals(param.toString(), "s");
- param = new StringParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- param = new StringParam("p", "") {
- };
- Assert.assertEquals(param.value(), null);
-
- param.setValue("S");
- Assert.assertEquals(param.value(), "S");
- }
-
- @Test
- public void paramRegEx() throws Exception {
- StringParam param = new StringParam("p", "Aaa", Pattern.compile("A.*")) {
- };
- Assert.assertEquals(param.getDomain(), "A.*");
- Assert.assertEquals(param.value(), "Aaa");
- Assert.assertEquals(param.toString(), "Aaa");
- param = new StringParam("p", null) {
- };
- Assert.assertEquals(param.value(), null);
- }
-
- @Test(expected = IllegalArgumentException.class)
- public void paramInvalidRegEx() throws Exception {
- new StringParam("p", "Baa", Pattern.compile("A.*")) {
- };
- }
-}
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties
new file mode 100644
index 0000000000..75175124c5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/resources/httpfs-log4j.properties
@@ -0,0 +1,22 @@
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+#log4j.appender.test=org.apache.log4j.varia.NullAppender
+#log4j.appender.test=org.apache.log4j.ConsoleAppender
+log4j.appender.test=org.apache.log4j.FileAppender
+log4j.appender.test.File=${test.dir}/test.log
+log4j.appender.test.Append=true
+log4j.appender.test.layout=org.apache.log4j.PatternLayout
+log4j.appender.test.layout.ConversionPattern=%d{ISO8601} %5p %20c{1}: %4L - %m%n
+log4j.rootLogger=ALL, test
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml
new file mode 100644
index 0000000000..a7fa6eb656
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-raid/pom.xml
@@ -0,0 +1,170 @@
+
+
+
+ 4.0.0
+
+ org.apache.hadoop
+ hadoop-project-dist
+ 3.0.0-SNAPSHOT
+ ../../hadoop-project-dist
+
+ org.apache.hadoop
+ hadoop-hdfs-raid
+ 3.0.0-SNAPSHOT
+ jar
+
+ Apache Hadoop HDFS Raid
+ Apache Hadoop HDFS Raid
+
+
+
+ raid
+ false
+
+
+
+
+ junit
+ junit
+ test
+
+
+ org.apache.hadoop
+ hadoop-annotations
+ provided
+
+
+ org.apache.hadoop
+ hadoop-minicluster
+ test
+
+
+ org.apache.hadoop
+ hadoop-client
+ provided
+
+
+ org.apache.hadoop
+ hadoop-archives
+ provided
+
+
+
+
+
+
+
+ maven-dependency-plugin
+
+
+ create-mrapp-generated-classpath
+ generate-test-resources
+
+ build-classpath
+
+
+
+ ${project.build.directory}/test-classes/mrapp-generated-classpath
+
+
+
+
+
+ org.apache.rat
+ apache-rat-plugin
+
+
+
+
+
+
+ org.codehaus.mojo
+ findbugs-maven-plugin
+
+
+
+
+
+
+
+
+
+ docs
+
+ false
+
+
+
+
+ org.apache.maven.plugins
+ maven-site-plugin
+
+
+