Skip to content

Commit

Permalink
HDFS-9168. Move client side unit test to hadoop-hdfs-client. Contribu…
Browse files Browse the repository at this point in the history
…ted by Haohui Mai.
  • Loading branch information
Haohui Mai committed Oct 28, 2015
1 parent 73bc65e commit 65f53f2
Show file tree
Hide file tree
Showing 25 changed files with 121 additions and 106 deletions.
21 changes: 21 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
Expand Up @@ -51,6 +51,27 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
</exclusion> </exclusion>
</exclusions> </exclusions>
</dependency> </dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
</dependencies> </dependencies>


<build> <build>
Expand Down
Expand Up @@ -30,16 +30,49 @@
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class ReplaceDatanodeOnFailure { public class ReplaceDatanodeOnFailure {
/**
* DEFAULT condition:
* Let r be the replication number.
* Let n be the number of existing datanodes.
* Add a new datanode only if r >= 3 and either
* (1) floor(r/2) >= n or (2) the block is hflushed/appended.
*/
private static final Condition CONDITION_DEFAULT = new Condition() {
@Override
public boolean satisfy(final short replication,
final DatanodeInfo[] existings, final int n, final boolean isAppend,
final boolean isHflushed) {
return replication >= 3 &&
(n <= (replication / 2) || isAppend || isHflushed);
}
};
/** Return false unconditionally. */
private static final Condition CONDITION_FALSE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return false;
}
};
/** Return true unconditionally. */
private static final Condition CONDITION_TRUE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return true;
}
};

/** The replacement policies */ /** The replacement policies */
public enum Policy { public enum Policy {
/** The feature is disabled in the entire site. */ /** The feature is disabled in the entire site. */
DISABLE(Condition.FALSE), DISABLE(CONDITION_FALSE),
/** Never add a new datanode. */ /** Never add a new datanode. */
NEVER(Condition.FALSE), NEVER(CONDITION_FALSE),
/** @see ReplaceDatanodeOnFailure.Condition#DEFAULT */ /** @see ReplaceDatanodeOnFailure#CONDITION_DEFAULT */
DEFAULT(Condition.DEFAULT), DEFAULT(CONDITION_DEFAULT),
/** Always add a new datanode when an existing datanode is removed. */ /** Always add a new datanode when an existing datanode is removed. */
ALWAYS(Condition.TRUE); ALWAYS(CONDITION_TRUE);


private final Condition condition; private final Condition condition;


Expand All @@ -54,41 +87,6 @@ Condition getCondition() {


/** Datanode replacement condition */ /** Datanode replacement condition */
private interface Condition { private interface Condition {
/** Return true unconditionally. */
Condition TRUE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return true;
}
};

/** Return false unconditionally. */
Condition FALSE = new Condition() {
@Override
public boolean satisfy(short replication, DatanodeInfo[] existings,
int nExistings, boolean isAppend, boolean isHflushed) {
return false;
}
};

/**
* DEFAULT condition:
* Let r be the replication number.
* Let n be the number of existing datanodes.
* Add a new datanode only if r >= 3 and either
* (1) floor(r/2) >= n; or
* (2) r > n and the block is hflushed/appended.
*/
Condition DEFAULT = new Condition() {
@Override
public boolean satisfy(final short replication,
final DatanodeInfo[] existings, final int n, final boolean isAppend,
final boolean isHflushed) {
return replication >= 3 &&
(n <= (replication / 2) || isAppend || isHflushed);
}
};


/** Is the condition satisfied? */ /** Is the condition satisfied? */
boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings, boolean satisfy(short replication, DatanodeInfo[] existings, int nExistings,
Expand Down
Expand Up @@ -17,18 +17,16 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;


import static org.junit.Assert.assertEquals;

import java.net.InetSocketAddress;
import java.net.URI;

import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.NameNode;

import org.junit.Test; import org.junit.Test;


import java.net.InetSocketAddress;
import java.net.URI;

import static org.junit.Assert.assertEquals;

/** Test NameNode port defaulting code. */ /** Test NameNode port defaulting code. */
public class TestDefaultNameNodePort { public class TestDefaultNameNodePort {


Expand Down
Expand Up @@ -17,29 +17,28 @@
*/ */
package org.apache.hadoop.hdfs; package org.apache.hadoop.hdfs;


import static org.junit.Assert.assertEquals; import com.google.common.collect.HashMultiset;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;

import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ReadableByteChannel;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.net.unix.DomainSocket; import org.apache.hadoop.net.unix.DomainSocket;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import com.google.common.collect.HashMultiset; import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.nio.channels.ReadableByteChannel;

import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;


public class TestPeerCache { public class TestPeerCache {
static final Log LOG = LogFactory.getLog(TestPeerCache.class); static final Logger LOG = LoggerFactory.getLogger(TestPeerCache.class);


private static class FakePeer implements Peer { private static class FakePeer implements Peer {
private boolean closed = false; private boolean closed = false;
Expand Down
Expand Up @@ -17,14 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.client.impl; package org.apache.hadoop.hdfs.client.impl;


import static org.junit.Assert.assertSame; import com.google.common.base.Supplier;

import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;

import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
Expand All @@ -35,7 +30,10 @@
import org.mockito.invocation.InvocationOnMock; import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer; import org.mockito.stubbing.Answer;


import com.google.common.base.Supplier; import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;

import static org.junit.Assert.assertSame;


public class TestLeaseRenewer { public class TestLeaseRenewer {
private final String FAKE_AUTHORITY="hdfs://nn1/"; private final String FAKE_AUTHORITY="hdfs://nn1/";
Expand Down
Expand Up @@ -17,26 +17,26 @@
*/ */
package org.apache.hadoop.hdfs.shortcircuit; package org.apache.hadoop.hdfs.shortcircuit;


import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.Iterator;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.ExtendedBlockId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot; import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory; import org.apache.hadoop.io.nativeio.SharedFileDescriptorFactory;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Assume; import org.junit.Assume;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.File;
import java.io.FileInputStream;
import java.util.ArrayList;
import java.util.Iterator;


public class TestShortCircuitShm { public class TestShortCircuitShm {
public static final Log LOG = LogFactory.getLog(TestShortCircuitShm.class); public static final Logger LOG = LoggerFactory.getLogger(
TestShortCircuitShm.class);


private static final File TEST_BASE = private static final File TEST_BASE =
new File(System.getProperty("test.build.data", "/tmp")); new File(System.getProperty("test.build.data", "/tmp"));
Expand Down
Expand Up @@ -17,6 +17,19 @@
*/ */
package org.apache.hadoop.hdfs.util; package org.apache.hadoop.hdfs.util;


import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
Expand All @@ -31,29 +44,16 @@
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;


import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.util.ByteArrayManager.Counter;
import org.apache.hadoop.hdfs.util.ByteArrayManager.CounterMap;
import org.apache.hadoop.hdfs.util.ByteArrayManager.FixedLengthManager;
import org.apache.hadoop.hdfs.util.ByteArrayManager.ManagerMap;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Test;

/** /**
* Test {@link ByteArrayManager}. * Test {@link ByteArrayManager}.
*/ */
public class TestByteArrayManager { public class TestByteArrayManager {
static { static {
GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class), GenericTestUtils.setLogLevel(
Level.ALL); LoggerFactory.getLogger(ByteArrayManager.class), Level.ALL);
} }


static final Log LOG = LogFactory.getLog(TestByteArrayManager.class); static final Logger LOG = LoggerFactory.getLogger(TestByteArrayManager.class);


private static final Comparator<Future<Integer>> CMP = new Comparator<Future<Integer>>() { private static final Comparator<Future<Integer>> CMP = new Comparator<Future<Integer>>() {
@Override @Override
Expand Down Expand Up @@ -559,9 +559,8 @@ public synchronized int release(byte[] array) {
} }


public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
GenericTestUtils.setLogLevel(LogFactory.getLog(ByteArrayManager.class), GenericTestUtils.setLogLevel(LoggerFactory.getLogger(ByteArrayManager.class),
Level.OFF); Level.OFF);

final int arrayLength = 64 * 1024; //64k final int arrayLength = 64 * 1024; //64k
final int nThreads = 512; final int nThreads = 512;
final int nAllocations = 1 << 15; final int nAllocations = 1 << 15;
Expand Down
Expand Up @@ -18,8 +18,6 @@
*/ */
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;


import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
Expand All @@ -38,6 +36,8 @@
import org.mockserver.model.Header; import org.mockserver.model.Header;
import org.mockserver.model.HttpRequest; import org.mockserver.model.HttpRequest;
import org.mockserver.model.HttpResponse; import org.mockserver.model.HttpResponse;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;


import java.io.IOException; import java.io.IOException;
import java.net.URI; import java.net.URI;
Expand All @@ -58,7 +58,8 @@
import static org.mockserver.model.HttpResponse.response; import static org.mockserver.model.HttpResponse.response;


public class TestWebHDFSOAuth2 { public class TestWebHDFSOAuth2 {
public static final Log LOG = LogFactory.getLog(TestWebHDFSOAuth2.class); public static final Logger LOG = LoggerFactory.getLogger(
TestWebHDFSOAuth2.class);


private ClientAndServer mockWebHDFS; private ClientAndServer mockWebHDFS;
private ClientAndServer mockOAuthServer; private ClientAndServer mockOAuthServer;
Expand Down
2 changes: 2 additions & 0 deletions hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Expand Up @@ -1641,6 +1641,8 @@ Release 2.8.0 - UNRELEASED
HDFS-9297. Update TestBlockMissingException to use corruptBlockOnDataNodesByDeletingBlockFile(). HDFS-9297. Update TestBlockMissingException to use corruptBlockOnDataNodesByDeletingBlockFile().
(Tony Wu via lei) (Tony Wu via lei)


HDFS-9168. Move client side unit test to hadoop-hdfs-client. (wheat9)

BUG FIXES BUG FIXES


HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs. HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.
Expand Down
6 changes: 0 additions & 6 deletions hadoop-hdfs-project/hadoop-hdfs/pom.xml
Expand Up @@ -211,12 +211,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<artifactId>leveldbjni-all</artifactId> <artifactId>leveldbjni-all</artifactId>
<version>1.8</version> <version>1.8</version>
</dependency> </dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>
<version>3.9.2</version>
<scope>test</scope>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency --> <!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency> <dependency>
<groupId>org.bouncycastle</groupId> <groupId>org.bouncycastle</groupId>
Expand Down
Expand Up @@ -19,7 +19,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;


import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
Expand Down Expand Up @@ -259,7 +259,7 @@ public void testAuditLoggerWithCallContext() throws IOException {
auditlog.clearOutput(); auditlog.clearOutput();


// long context is truncated // long context is truncated
final String longContext = RandomStringUtils.randomAscii(200); final String longContext = StringUtils.repeat("foo", 100);
context = new CallerContext.Builder(longContext) context = new CallerContext.Builder(longContext)
.setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING)) .setSignature("L".getBytes(CallerContext.SIGNATURE_ENCODING))
.build(); .build();
Expand Down
5 changes: 5 additions & 0 deletions hadoop-project/pom.xml
Expand Up @@ -779,6 +779,11 @@
<artifactId>mockito-all</artifactId> <artifactId>mockito-all</artifactId>
<version>1.8.5</version> <version>1.8.5</version>
</dependency> </dependency>
<dependency>
<groupId>org.mock-server</groupId>
<artifactId>mockserver-netty</artifactId>
<version>3.9.2</version>
</dependency>
<dependency> <dependency>
<groupId>org.apache.avro</groupId> <groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId> <artifactId>avro</artifactId>
Expand Down

0 comments on commit 65f53f2

Please sign in to comment.