diff --git a/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..57d28c4ce1c --- /dev/null +++ b/hadoop-hdds/client/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index e3b0824fa8f..8ca40ab392d 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -63,4 +63,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java index 15f7edab6fc..5f3210d76bc 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java @@ -44,7 +44,7 @@ public DummyChunkInputStream(ChunkInfo chunkInfo, byte[] data, Pipeline pipeline) { super(chunkInfo, blockId, xceiverClientFactory, () -> pipeline, verifyChecksum, null); - this.chunkData = data; + this.chunkData = data.clone(); } @Override diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java index 940caa73b0f..e202875c064 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java @@ -68,8 +68,8 @@ public class TestBlockInputStream { private static final int CHUNK_SIZE = 100; - private static Checksum checksum; + private Checksum checksum; private BlockInputStream blockStream; private byte[] blockData; private int blockSize; @@ -231,11 +231,15 @@ public void testRefreshPipelineFunction() throws Exception { MockPipeline.createSingleNodePipeline(), null, false, null, chunks, chunkDataMap, isRefreshed); - Assert.assertFalse(isRefreshed.get()); - seekAndVerify(50); - byte[] b = new byte[200]; - blockInputStreamWithRetry.read(b, 0, 200); - Assert.assertTrue(isRefreshed.get()); + try { + Assert.assertFalse(isRefreshed.get()); + seekAndVerify(50); + byte[] b = new byte[200]; + blockInputStreamWithRetry.read(b, 0, 200); + Assert.assertTrue(isRefreshed.get()); + } finally { + blockInputStreamWithRetry.close(); + } } @Test @@ -263,15 +267,19 @@ protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; } }; - subject.initialize(); + try { + subject.initialize(); - // WHEN - byte[] b = new byte[len]; - int bytesRead = subject.read(b, 0, len); + // WHEN + byte[] b = new byte[len]; + int bytesRead = subject.read(b, 0, len); - // THEN - Assert.assertEquals(len, bytesRead); - verify(refreshPipeline).apply(blockID); + // THEN + Assert.assertEquals(len, bytesRead); + verify(refreshPipeline).apply(blockID); + } finally { + subject.close(); + } } @Test @@ -297,15 +305,20 @@ protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; } }; - subject.initialize(); - // WHEN - byte[] b = new byte[len]; - LambdaTestUtils.intercept(StorageContainerException.class, - () -> subject.read(b, 0, len)); + try { + subject.initialize(); + + // WHEN + byte[] b = new byte[len]; + LambdaTestUtils.intercept(StorageContainerException.class, + () -> subject.read(b, 0, len)); - // THEN - verify(refreshPipeline).apply(blockID); + // THEN + verify(refreshPipeline).apply(blockID); + } finally { + subject.close(); + } } @Test @@ -328,15 +341,20 @@ protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; } }; - subject.initialize(); - // WHEN - byte[] b = new byte[len]; - LambdaTestUtils.intercept(OzoneChecksumException.class, - () -> subject.read(b, 0, len)); + try { + subject.initialize(); + + // WHEN + byte[] b = new byte[len]; + LambdaTestUtils.intercept(OzoneChecksumException.class, + () -> subject.read(b, 0, len)); - // THEN - verify(refreshPipeline, never()).apply(blockID); + // THEN + verify(refreshPipeline, never()).apply(blockID); + } finally { + subject.close(); + } } private Pipeline samePipelineWithNewId(Pipeline pipeline) { @@ -380,17 +398,22 @@ protected ChunkInputStream createChunkInputStream(ChunkInfo chunkInfo) { return stream; } }; - subject.initialize(); - subject.unbuffer(); - - // WHEN - byte[] b = new byte[len]; - int bytesRead = subject.read(b, 0, len); - - // THEN - Assert.assertEquals(len, bytesRead); - verify(refreshPipeline).apply(blockID); - verify(clientFactory).acquireClientForReadData(pipeline); - verify(clientFactory).releaseClient(client, false); + + try { + subject.initialize(); + subject.unbuffer(); + + // WHEN + byte[] b = new byte[len]; + int bytesRead = subject.read(b, 0, len); + + // THEN + Assert.assertEquals(len, bytesRead); + verify(refreshPipeline).apply(blockID); + verify(clientFactory).acquireClientForReadData(pipeline); + verify(clientFactory).releaseClient(client, false); + } finally { + subject.close(); + } } } diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java index 71d04a00e64..59145b6c5b7 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java @@ -44,7 +44,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; -import org.jetbrains.annotations.NotNull; import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; @@ -88,7 +87,6 @@ public void test() throws IOException { } } - @NotNull private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) throws IOException { @@ -119,7 +117,7 @@ private BlockOutputStream createBlockOutputStream(BufferPool bufferPool) /** * XCeiverClient which simulates responses. */ - private class MockXceiverClientSpi extends XceiverClientSpi { + private static class MockXceiverClientSpi extends XceiverClientSpi { private final Pipeline pipeline; diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java index cb110b182d8..a9296bc9a1a 100644 --- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java +++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java @@ -49,8 +49,8 @@ public class TestChunkInputStream { private static final int BYTES_PER_CHECKSUM = 20; private static final String CHUNK_NAME = "dummyChunk"; private static final Random RANDOM = new Random(); - private static Checksum checksum; + private Checksum checksum; private DummyChunkInputStream chunkStream; private ChunkInfo chunkInfo; private byte[] chunkData; @@ -221,13 +221,17 @@ protected ByteString readChunk(ChunkInfo readChunkInfo) { } }; - // WHEN - subject.unbuffer(); - pipelineRef.set(newPipeline); - int b = subject.read(); - - // THEN - Assert.assertNotEquals(-1, b); - verify(clientFactory).acquireClientForReadData(newPipeline); + try { + // WHEN + subject.unbuffer(); + pipelineRef.set(newPipeline); + int b = subject.read(); + + // THEN + Assert.assertNotEquals(-1, b); + verify(clientFactory).acquireClientForReadData(newPipeline); + } finally { + subject.close(); + } } } diff --git a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml index 78da0706c18..eee80656fb7 100644 --- a/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/common/dev-support/findbugsExcludeFile.xml @@ -27,4 +27,14 @@ + + + + + + + + + + diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java index 9263b5c6a33..8a177042a64 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java @@ -19,8 +19,10 @@ import java.io.BufferedWriter; import java.io.File; -import java.io.FileWriter; +import java.io.FileOutputStream; import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.util.concurrent.TimeUnit; import org.apache.hadoop.fs.Path; @@ -61,7 +63,9 @@ private void endConfig(BufferedWriter out) throws IOException { public void testGetAllPropertiesByTags() throws Exception { File coreDefault = tempConfigs.newFile("core-default-test.xml"); File coreSite = tempConfigs.newFile("core-site-test.xml"); - try (BufferedWriter out = new BufferedWriter(new FileWriter(coreDefault))) { + FileOutputStream coreDefaultStream = new FileOutputStream(coreDefault); + try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter( + coreDefaultStream, StandardCharsets.UTF_8))) { startConfig(out); appendProperty(out, "hadoop.tags.system", "YARN,HDFS,NAMENODE"); appendProperty(out, "hadoop.tags.custom", "MYCUSTOMTAG"); @@ -78,7 +82,9 @@ public void testGetAllPropertiesByTags() throws Exception { .getProperty("dfs.random.key")); } - try (BufferedWriter out = new BufferedWriter(new FileWriter(coreSite))) { + FileOutputStream coreSiteStream = new FileOutputStream(coreSite); + try (BufferedWriter out = new BufferedWriter(new OutputStreamWriter( + coreSiteStream, StandardCharsets.UTF_8))) { startConfig(out); appendProperty(out, "dfs.random.key", "ABC"); appendProperty(out, "dfs.replication", "3"); diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java index c8dfd2c8159..37df09e8de4 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java @@ -65,7 +65,7 @@ public class TestNetworkTopologyImpl { public TestNetworkTopologyImpl(NodeSchema[] schemas, Node[] nodeArray) { NodeSchemaManager.getInstance().init(schemas, true); cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance()); - dataNodes = nodeArray; + dataNodes = nodeArray.clone(); for (int i = 0; i < dataNodes.length; i++) { cluster.add(dataNodes[i]); } @@ -634,8 +634,7 @@ public void testChooseRandomWithAffinityNode() { key.getNetworkFullPath() + ", ancestor node:" + affinityAncestor.getNetworkFullPath() + ", excludedScope: " + pathList.toString() + ", " + - "excludedList:" + (excludedList == null ? "" : - excludedList.toString())); + "excludedList:" + excludedList.toString()); } } } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java index 10724ab7c28..0bd78f36b82 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/tracing/TestStringCodec.java @@ -45,7 +45,7 @@ void testExtract() throws Exception { () -> codec.extract(sb)); sb.append(":66"); JaegerSpanContext context = codec.extract(sb); - String expectedContextString = new String("123:456:789:66"); + String expectedContextString = "123:456:789:66"; assertTrue(context.getTraceId().equals("123")); assertTrue(context.toString().equals(expectedContextString)); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java index 8a9d9c51a86..41dc4f5b7e0 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/audit/TestOzoneAuditLogger.java @@ -212,7 +212,7 @@ private void verifyNoLog() throws IOException { assertEquals(0, lines.size()); } - private class TestException extends Exception{ + private static class TestException extends Exception{ TestException(String message) { super(message); } diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java index a924321bfd5..2e144e65699 100644 --- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/common/TestChecksum.java @@ -22,6 +22,8 @@ import org.junit.Assert; import org.junit.Test; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Tests for {@link Checksum} class. */ @@ -45,7 +47,7 @@ private Checksum getChecksum(ContainerProtos.ChecksumType type) { public void testVerifyChecksum() throws Exception { Checksum checksum = getChecksum(null); int dataLen = 55; - byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes(); + byte[] data = RandomStringUtils.randomAlphabetic(dataLen).getBytes(UTF_8); ChecksumData checksumData = checksum.computeChecksum(data); @@ -65,7 +67,7 @@ public void testVerifyChecksum() throws Exception { @Test public void testIncorrectChecksum() throws Exception { Checksum checksum = getChecksum(null); - byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(); + byte[] data = RandomStringUtils.randomAlphabetic(55).getBytes(UTF_8); ChecksumData originalChecksumData = checksum.computeChecksum(data); // Change the data and check if new checksum matches the original checksum. diff --git a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java index b2dc99f361d..8fe3f2d5442 100644 --- a/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java +++ b/hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigFileGenerator.java @@ -19,6 +19,7 @@ import java.io.File; import java.io.FileNotFoundException; +import java.nio.charset.StandardCharsets; import java.util.Scanner; import org.junit.Assert; @@ -36,7 +37,8 @@ public class TestConfigFileGenerator { @Test public void testGeneratedXml() throws FileNotFoundException { String generatedXml = - new Scanner(new File("target/test-classes/ozone-default-generated.xml")) + new Scanner(new File("target/test-classes/ozone-default-generated.xml"), + StandardCharsets.UTF_8.name()) .useDelimiter("//Z") .next(); diff --git a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml index 3359383c47b..7d07e2fcd81 100644 --- a/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml +++ b/hadoop-hdds/container-service/dev-support/findbugsExcludeFile.xml @@ -27,4 +27,62 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java index 0967503b701..8de34ccd768 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java @@ -34,7 +34,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import com.google.common.annotations.VisibleForTesting; -import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -113,7 +112,6 @@ public CompletableFuture getContainerDataFromReplicas( //There is a chance for the download is successful but import is failed, //due to data corruption. We need a random selected datanode to have a //chance to succeed next time. - @NotNull protected List shuffleDatanodes( List sourceDatanodes ) { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index f58ecf5f04e..1ec7d967cae 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -61,13 +61,14 @@ public class TestHddsSecureDatanodeInit { private static PrivateKey privateKey; private static PublicKey publicKey; private static GenericTestUtils.LogCapturer dnLogs; - private static CertificateClient client; private static SecurityConfig securityConfig; private static KeyCodec keyCodec; private static CertificateCodec certCodec; private static X509CertificateHolder certHolder; private static final String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; + private CertificateClient client; + @BeforeClass public static void setUp() throws Exception { testDir = GenericTestUtils.getRandomizedTestDir(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java index 40cfbbac96c..e430a301811 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java @@ -590,9 +590,8 @@ private static RaftServer.Division getRaftServerDivision( XceiverServerRatis server = (XceiverServerRatis) (dn.getDatanodeStateMachine(). getContainer().getWriteChannel()); - return pipeline == null ? server.getServerDivision() : - server.getServerDivision( - RatisHelper.newRaftGroup(pipeline).getGroupId()); + return server.getServerDivision( + RatisHelper.newRaftGroup(pipeline).getGroupId()); } public static StateMachine getStateMachine(HddsDatanodeService dn, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java index f4e21dc4182..8b80f724ea9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java @@ -47,7 +47,7 @@ @RunWith(Parameterized.class) public class TestContainerDataYaml { - private static long testContainerID = 1234; + private long testContainerID = 1234; private static String testRoot = new FileSystemTestHelper().getTestRootDir(); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java index 4cd0e516f71..510b066126a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDeletionChoosingPolicy.java @@ -52,7 +52,7 @@ */ @RunWith(Parameterized.class) public class TestContainerDeletionChoosingPolicy { - private static String path; + private String path; private OzoneContainer ozoneContainer; private ContainerSet containerSet; private OzoneConfiguration conf; diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 22fa88fd05a..cc9bf46d636 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -98,11 +98,13 @@ public class TestContainerPersistence { LoggerFactory.getLogger(TestContainerPersistence.class); private static String hddsPath; private static OzoneConfiguration conf; - private static ContainerSet containerSet; - private static VolumeSet volumeSet; private static VolumeChoosingPolicy volumeChoosingPolicy; - private static BlockManager blockManager; - private static ChunkManager chunkManager; + + private ContainerSet containerSet; + private VolumeSet volumeSet; + private BlockManager blockManager; + private ChunkManager chunkManager; + @Rule public ExpectedException exception = ExpectedException.none(); /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java index 7b41d99278f..81581f19967 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java @@ -54,7 +54,6 @@ public class TestHandler { private HddsDispatcher dispatcher; private ContainerSet containerSet; private VolumeSet volumeSet; - private Handler handler; @Before public void setup() throws Exception { diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java index c29e5dfc02d..24badabac1a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java @@ -226,8 +226,6 @@ public void testKeyValueBlockIteratorWithFilter() throws Exception { public void testKeyValueBlockIteratorWithOnlyDeletedBlocks() throws Exception { createContainerWithBlocks(CONTAINER_ID, 0, 5); - String containerPath = new File(containerData.getMetadataPath()) - .getParent(); try(BlockIterator keyValueBlockIterator = db.getStore().getBlockIterator()) { //As all blocks are deleted blocks, blocks does not match with normal key diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index 4000e34771f..d50e240a4c6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -343,8 +343,6 @@ public void testCloseContainer() throws Exception { keyValueContainerData.getState()); //Check state in the .container file - String containerMetaDataPath = keyValueContainerData - .getMetadataPath(); File containerFile = keyValueContainer.getContainerFile(); keyValueContainerData = (KeyValueContainerData) ContainerDataYaml diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java index 3362d6d479a..716576f1730 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainerCheck.java @@ -56,6 +56,7 @@ import java.util.List; import java.util.UUID; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -208,7 +209,7 @@ private void createContainerWithBlocks(long containerId, int normalBlocks, int bytesPerChecksum = 2 * unitLen; Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum); - byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes(); + byte[] chunkData = RandomStringUtils.randomAscii(chunkLen).getBytes(UTF_8); ChecksumData checksumData = checksum.computeChecksum(chunkData); DispatcherContext writeStage = new DispatcherContext.Builder() .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA) @@ -219,7 +220,7 @@ private void createContainerWithBlocks(long containerId, int normalBlocks, containerData = new KeyValueContainerData(containerId, chunkManagerTestInfo.getLayout(), - chunksPerBlock * chunkLen * totalBlocks, + (long) chunksPerBlock * chunkLen * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString()); container = new KeyValueContainer(containerData, conf); container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index dc4c3e56547..a89bd178544 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -76,15 +76,15 @@ public class TestKeyValueHandler { @Rule public TestRule timeout = new Timeout(300000); - private static HddsDispatcher dispatcher; - private static KeyValueHandler handler; - private static final String DATANODE_UUID = UUID.randomUUID().toString(); private static final long DUMMY_CONTAINER_ID = 9999; private final ChunkLayOutVersion layout; + private HddsDispatcher dispatcher; + private KeyValueHandler handler; + public TestKeyValueHandler(ChunkLayOutVersion layout) { this.layout = layout; } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java index 651df89765b..7267314e62e 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandlerWithUnhealthyContainer.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.junit.Assert; @@ -133,7 +132,6 @@ public void testGetSmallFile() throws IOException { // -- Helper methods below. private KeyValueHandler getDummyHandler() throws IOException { - OzoneConfiguration conf = new OzoneConfiguration(); DatanodeDetails dnDetails = DatanodeDetails.newBuilder() .setUuid(UUID.fromString(DATANODE_UUID)) .setHostName("dummyHost") @@ -142,10 +140,6 @@ private KeyValueHandler getDummyHandler() throws IOException { DatanodeStateMachine stateMachine = mock(DatanodeStateMachine.class); when(stateMachine.getDatanodeDetails()).thenReturn(dnDetails); - StateContext context = new StateContext( - conf, DatanodeStateMachine.DatanodeStates.RUNNING, - stateMachine); - return new KeyValueHandler( new OzoneConfiguration(), stateMachine.getDatanodeDetails().getUuidString(), @@ -205,6 +199,7 @@ private ContainerCommandRequestProto getDummyCommandRequestProto( builder.setGetCommittedBlockLength( ContainerProtos.GetCommittedBlockLengthRequestProto.newBuilder() .setBlockID(fakeBlockId).build()); + break; case ReadChunk: builder.setReadChunk(ContainerProtos.ReadChunkRequestProto.newBuilder() .setBlockID(fakeBlockId).setChunkData(fakeChunkInfo).build()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java index d248ac1059f..bee5381d964 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestTarContainerPacker.java @@ -21,9 +21,8 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; -import java.nio.charset.StandardCharsets; +import java.io.OutputStreamWriter; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; @@ -55,6 +54,7 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.commons.compress.compressors.CompressorStreamFactory.GZIP; /** @@ -167,10 +167,11 @@ public void pack() throws IOException, CompressorException { } //THEN: check the result + TarArchiveInputStream tarStream = null; try (FileInputStream input = new FileInputStream(targetFile.toFile())) { CompressorInputStream uncompressed = new CompressorStreamFactory() .createCompressorInputStream(GZIP, input); - TarArchiveInputStream tarStream = new TarArchiveInputStream(uncompressed); + tarStream = new TarArchiveInputStream(uncompressed); TarArchiveEntry entry; Map entries = new HashMap<>(); @@ -181,12 +182,16 @@ public void pack() throws IOException, CompressorException { Assert.assertTrue( entries.containsKey("container.yaml")); + } finally { + if (tarStream != null) { + tarStream.close(); + } } //read the container descriptor only try (FileInputStream input = new FileInputStream(targetFile.toFile())) { String containerYaml = new String(packer.unpackContainerDescriptor(input), - StandardCharsets.UTF_8); + UTF_8); Assert.assertEquals(TEST_DESCRIPTOR_FILE_CONTENT, containerYaml); } @@ -202,7 +207,7 @@ public void pack() throws IOException, CompressorException { try (FileInputStream input = new FileInputStream(targetFile.toFile())) { descriptor = new String(packer.unpackContainerData(destinationContainer, input), - StandardCharsets.UTF_8); + UTF_8); } assertExampleMetadataDbIsGood( @@ -304,7 +309,10 @@ private KeyValueContainerData unpackContainerData(File containerFile) } private void writeDescriptor(KeyValueContainer container) throws IOException { - try (FileWriter writer = new FileWriter(container.getContainerFile())) { + FileOutputStream fileStream = new FileOutputStream( + container.getContainerFile()); + try (OutputStreamWriter writer = new OutputStreamWriter(fileStream, + UTF_8)) { IOUtils.write(TEST_DESCRIPTOR_FILE_CONTENT, writer); } } @@ -316,7 +324,9 @@ private File writeChunkFile( .resolve(chunkFileName); Files.createDirectories(path.getParent()); File file = path.toFile(); - try (FileWriter writer = new FileWriter(file)) { + FileOutputStream fileStream = new FileOutputStream(file); + try (OutputStreamWriter writer = new OutputStreamWriter(fileStream, + UTF_8)) { IOUtils.write(TEST_CHUNK_FILE_CONTENT, writer); } return file; @@ -329,7 +339,9 @@ private File writeDbFile( .resolve(dbFileName); Files.createDirectories(path.getParent()); File file = path.toFile(); - try (FileWriter writer = new FileWriter(file)) { + FileOutputStream fileStream = new FileOutputStream(file); + try (OutputStreamWriter writer = new OutputStreamWriter(fileStream, + UTF_8)) { IOUtils.write(TEST_DB_FILE_CONTENT, writer); } return file; @@ -357,8 +369,7 @@ private void assertExampleMetadataDbIsGood(Path dbPath, String filename) Files.exists(dbFile)); try (FileInputStream testFile = new FileInputStream(dbFile.toFile())) { - List strings = IOUtils - .readLines(testFile, StandardCharsets.UTF_8); + List strings = IOUtils.readLines(testFile, UTF_8); Assert.assertEquals(1, strings.size()); Assert.assertEquals(TEST_DB_FILE_CONTENT, strings.get(0)); } @@ -375,8 +386,7 @@ private void assertExampleChunkFileIsGood(Path chunkPath, String filename) Files.exists(chunkFile)); try (FileInputStream testFile = new FileInputStream(chunkFile.toFile())) { - List strings = IOUtils - .readLines(testFile, StandardCharsets.UTF_8); + List strings = IOUtils.readLines(testFile, UTF_8); Assert.assertEquals(1, strings.size()); Assert.assertEquals(TEST_CHUNK_FILE_CONTENT, strings.get(0)); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java index 635f3b4742d..5b64a378bca 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/helpers/TestChunkUtils.java @@ -20,7 +20,6 @@ import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; -import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Arrays; @@ -40,6 +39,8 @@ import org.apache.hadoop.test.GenericTestUtils; import org.apache.commons.io.FileUtils; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_FIND_CHUNK; import org.apache.hadoop.test.LambdaTestUtils; @@ -64,7 +65,7 @@ public class TestChunkUtils { @Test public void concurrentReadOfSameFile() throws Exception { String s = "Hello World"; - byte[] array = s.getBytes(); + byte[] array = s.getBytes(UTF_8); ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array)); Path tempFile = Files.createTempFile(PREFIX, "concurrent"); try { @@ -85,7 +86,7 @@ public void concurrentReadOfSameFile() throws Exception { ByteBuffer readBuffer = ByteBuffer.allocate((int) len); ChunkUtils.readData(file, readBuffer, offset, len, stats); LOG.info("Read data ({}): {}", threadNumber, - new String(readBuffer.array())); + new String(readBuffer.array(), UTF_8)); if (!Arrays.equals(array, readBuffer.array())) { failed.set(true); } @@ -151,7 +152,7 @@ public void concurrentProcessing() throws Exception { @Test public void serialRead() throws Exception { String s = "Hello World"; - byte[] array = s.getBytes(); + byte[] array = s.getBytes(UTF_8); ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap(array)); Path tempFile = Files.createTempFile(PREFIX, "serial"); try { @@ -175,7 +176,7 @@ public void serialRead() throws Exception { public void validateChunkForOverwrite() throws IOException { Path tempFile = Files.createTempFile(PREFIX, "overwrite"); - FileUtils.write(tempFile.toFile(), "test", StandardCharsets.UTF_8); + FileUtils.write(tempFile.toFile(), "test", UTF_8); Assert.assertTrue( ChunkUtils.validateChunkForOverwrite(tempFile.toFile(), diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index 982cea35f83..96a83a7ea16 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -277,7 +277,7 @@ private long addBlocks(KeyValueContainer container, chunkList.clear(); for (int ci = 0; ci < chunksPerBlock; ci++) { String chunkName = strBlock + bi + strChunk + ci; - long offset = ci * datalen; + long offset = ci * (long) datalen; ChunkInfo info = new ChunkInfo(chunkName, offset, datalen); usedBytes += datalen; chunkList.add(info.getProtoBufMessage()); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java index 2c517cb123f..ebdfad63602 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisorScheduling.java @@ -81,11 +81,12 @@ public void test() throws InterruptedException { final Map volumes = volumeLocks.get(sourceDatanode.getUuid()); - synchronized (volumes.get(random.nextInt(volumes.size()))) { + Object volumeLock = volumes.get(random.nextInt(volumes.size())); + synchronized (volumeLock) { System.out.println("Downloading " + task.getContainerId() + " from " + sourceDatanode.getUuid()); try { - Thread.sleep(1000); + volumeLock.wait(1000); } catch (InterruptedException ex) { ex.printStackTrace(); } @@ -93,13 +94,14 @@ public void test() throws InterruptedException { //import, limited by the destination datanode final int volumeIndex = random.nextInt(destinationLocks.size()); - synchronized (destinationLocks.get(volumeIndex)) { + Object destinationLock = destinationLocks.get(volumeIndex); + synchronized (destinationLock) { System.out.println( "Importing " + task.getContainerId() + " to disk " + volumeIndex); try { - Thread.sleep(1000); + destinationLock.wait(1000); } catch (InterruptedException ex) { ex.printStackTrace(); } diff --git a/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..6251188ecc1 --- /dev/null +++ b/hadoop-hdds/framework/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 4f986699575..9409c5ca666 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -128,4 +128,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-jar + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java index d0ece83a32b..c29dcd9accd 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestCertificateClientInit.java @@ -201,7 +201,7 @@ public void testInitOzoneManager() throws Exception { } InitResponse response = omCertificateClient.init(); - if (pvtKeyPresent && pubKeyPresent & !certPresent) { + if (pvtKeyPresent && pubKeyPresent && !certPresent) { assertTrue(response.equals(RECOVER)); } else { assertTrue(response.equals(expectedResult)); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java index c8201c06b4d..9413611f1d6 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.security.x509.certificate.utils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -27,9 +28,10 @@ import java.io.BufferedReader; import java.io.ByteArrayInputStream; import java.io.File; -import java.io.FileReader; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; +import java.io.InputStreamReader; import java.math.BigInteger; import java.nio.file.Paths; import java.security.KeyPair; @@ -134,7 +136,8 @@ public void testWriteCRL() throws IOException, OperatorCreationException { this.securityConfig.getCrlName()).toFile(); assertTrue(crlFile.exists()); - try (BufferedReader reader = new BufferedReader(new FileReader(crlFile))){ + try (BufferedReader reader = new BufferedReader(new InputStreamReader( + new FileInputStream(crlFile), UTF_8))){ // Verify contents of the file String header = reader.readLine(); @@ -159,7 +162,7 @@ public void testWriteCRLX509() throws IOException, builder.addCRLEntry(x509CertificateHolder.getSerialNumber(), now, CRLReason.cACompromise); - byte[] crlBytes = TMP_CRL_ENTRY.getBytes(); + byte[] crlBytes = TMP_CRL_ENTRY.getBytes(UTF_8); try (InputStream inStream = new ByteArrayInputStream(crlBytes)) { CertificateFactory cf = CertificateFactory.getInstance("X.509"); X509CRL crl = (X509CRL)cf.generateCRL(inStream); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java index 97603d41faf..aa6f5ce2f67 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java @@ -220,7 +220,7 @@ public void cleanup() throws IOException { store.close(); store.destroy(); } else { - System.out.println("--- Store already closed: " + store.getClass()); + System.out.println("--- Store already closed."); } if (testDir != null) { FileUtils.deleteDirectory(testDir); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java index 8b3554a014c..4db70b8b936 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java @@ -160,7 +160,7 @@ public MetricsRecordBuilder addRecord(MetricsInfo metricsInfo) { /** * Test class to buffer a single snapshot of metrics. */ - class BufferedMetricsRecordBuilderImpl extends MetricsRecordBuilder { + static class BufferedMetricsRecordBuilderImpl extends MetricsRecordBuilder { private Map metrics = new HashMap<>(); private String contextName; diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..55b900bae97 --- /dev/null +++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,47 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 179d3d742e2..a5e9935df2a 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -170,6 +170,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java index b24091f7f23..f868522b17f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.exceptions.SCMException; -import org.jetbrains.annotations.NotNull; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -66,7 +65,6 @@ public static PipelineChoosePolicy getPolicy( } } - @NotNull private static PipelineChoosePolicy createPipelineChoosePolicyFromClass( Class policyClass) throws SCMException { Constructor constructor; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index e0016f8e6eb..a202647db58 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -81,8 +81,8 @@ public class TestBlockManager { private SCMPipelineManager pipelineManager; private BlockManagerImpl blockManager; private static final long DEFAULT_BLOCK_SIZE = 128 * MB; - private static HddsProtos.ReplicationFactor factor; - private static HddsProtos.ReplicationType type; + private HddsProtos.ReplicationFactor factor; + private HddsProtos.ReplicationType type; private EventQueue eventQueue; private int numContainerPerOwnerInPipeline; private OzoneConfiguration conf; @@ -436,7 +436,7 @@ public void testBlockDistributionWithMultipleRaftLogDisks() throws Exception { public void testAllocateOversizedBlock() throws Exception { long size = 6 * GB; thrown.expectMessage("Unsupported block size"); - AllocatedBlock block = blockManager.allocateBlock(size, + blockManager.allocateBlock(size, type, factor, OzoneConsts.OZONE, new ExcludeList()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index d490e95bd63..df5126ad4c1 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -19,7 +19,6 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.StringUtils; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; @@ -41,7 +40,6 @@ import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto .DeleteBlockTransactionResult; -import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.test.GenericTestUtils; @@ -54,7 +52,6 @@ import java.io.File; import java.io.IOException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -78,7 +75,7 @@ */ public class TestDeletedBlockLog { - private static DeletedBlockLogImpl deletedBlockLog; + private DeletedBlockLogImpl deletedBlockLog; private static final int BLOCKS_PER_TXN = 5; private OzoneConfiguration conf; private File testDir; @@ -318,11 +315,7 @@ public void testRandomOperateTransactions() throws Exception { Random random = new Random(); int added = 0, committed = 0; List blocks = new ArrayList<>(); - List txIDs = new ArrayList<>(); - byte[] latestTxid = StringUtils.string2Bytes("#LATEST_TXID#"); - MetadataKeyFilters.MetadataKeyFilter avoidLatestTxid = - (preKey, currentKey, nextKey) -> - !Arrays.equals(latestTxid, currentKey); + List txIDs; // Randomly add/get/commit/increase transactions. for (int i = 0; i < 100; i++) { int state = random.nextInt(4); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java index 6a7e9297f66..22538208866 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java @@ -542,7 +542,7 @@ public List processHeartbeat(DatanodeDetails datanodeDetails) { @Override public Boolean isNodeRegistered( DatanodeDetails datanodeDetails) { - return null; + return false; } @Override diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java index 4cabd0183da..7f0d651c671 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/SimpleMockNodeManager.java @@ -317,7 +317,7 @@ public List processHeartbeat(DatanodeDetails datanodeDetails) { @Override public Boolean isNodeRegistered(DatanodeDetails datanodeDetails) { - return null; + return false; } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index b080ea1a820..064d24ecbf7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -157,7 +157,7 @@ public void testCloseContainerEventWithValidContainers() throws IOException { @Test public void testCloseContainerEventWithRatis() throws IOException { - GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer + GenericTestUtils.LogCapturer .captureLogs(CloseContainerEventHandler.LOG); ContainerInfo container = containerManager .allocateContainer(HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java index 979e37ff974..cc9e49ff132 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java @@ -39,7 +39,6 @@ import org.mockito.Mockito; import java.io.IOException; -import java.util.HashSet; import java.util.Iterator; import java.util.Set; import java.util.stream.Collectors; @@ -501,9 +500,6 @@ public void openContainerKeyAndBytesUsedUpdatedToMinimumOfAllReplicas() = ContainerReplicaProto.State.OPEN; final ContainerInfo containerOne = getContainer(LifeCycleState.OPEN); - final Set containerIDSet = new HashSet<>(); - containerIDSet.add(containerOne.containerID()); - containerStateManager.loadContainer(containerOne); // Container loaded, no replicas reported from DNs. Expect zeros for // usage values. @@ -572,9 +568,6 @@ public void notOpenContainerKeyAndBytesUsedUpdatedToMaximumOfAllReplicas() = ContainerReplicaProto.State.CLOSED; final ContainerInfo containerOne = getContainer(LifeCycleState.CLOSED); - final Set containerIDSet = new HashSet<>(); - containerIDSet.add(containerOne.containerID()); - containerStateManager.loadContainer(containerOne); // Container loaded, no replicas reported from DNs. Expect zeros for // usage values. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java index bc34c9714ff..7983bcdd0f3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java @@ -1073,7 +1073,6 @@ public void testUnderReplicatedNotHealthySource() private ContainerInfo createContainer(LifeCycleState containerState) throws SCMException { final ContainerInfo container = getContainer(containerState); - final ContainerID id = container.containerID(); containerStateManager.loadContainer(container); return container; } @@ -1116,7 +1115,7 @@ public void teardown() throws IOException { replicationManager.stop(); } - private class DatanodeCommandHandler implements + private static class DatanodeCommandHandler implements EventHandler { private AtomicInteger invocation = new AtomicInteger(0); @@ -1163,7 +1162,7 @@ private boolean received(final SCMCommandProto.Type type, } } - class ListOfNElements extends ArgumentMatcher { + static class ListOfNElements extends ArgumentMatcher { private int expected; @@ -1177,7 +1176,7 @@ public boolean matches(Object argument) { } } - class FunctionMatcher extends ArgumentMatcher { + static class FunctionMatcher extends ArgumentMatcher { private Function function; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java index aa506cb763b..3ced1a24c04 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java @@ -55,7 +55,7 @@ public class TestContainerPlacementFactory { // datanodes array list private List datanodes = new ArrayList<>(); // node storage capacity - private final long storageCapacity = 100L; + private static final long STORAGE_CAPACITY = 100L; // configuration private OzoneConfiguration conf; // node manager @@ -93,13 +93,13 @@ public void testRackAwarePolicy() throws IOException { when(nodeManager.getNodes(NodeStatus.inServiceHealthy())) .thenReturn(new ArrayList<>(datanodes)); when(nodeManager.getNodeStat(anyObject())) - .thenReturn(new SCMNodeMetric(storageCapacity, 0L, 100L)); + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 0L, 100L)); when(nodeManager.getNodeStat(datanodes.get(2))) - .thenReturn(new SCMNodeMetric(storageCapacity, 90L, 10L)); + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 90L, 10L)); when(nodeManager.getNodeStat(datanodes.get(3))) - .thenReturn(new SCMNodeMetric(storageCapacity, 80L, 20L)); + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 80L, 20L)); when(nodeManager.getNodeStat(datanodes.get(4))) - .thenReturn(new SCMNodeMetric(storageCapacity, 70L, 30L)); + .thenReturn(new SCMNodeMetric(STORAGE_CAPACITY, 70L, 30L)); PlacementPolicy policy = ContainerPlacementPolicyFactory .getPolicy(conf, nodeManager, cluster, true, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java index 33fe35f5064..d77996706d5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java @@ -512,7 +512,7 @@ private void mockGetContainerReplicaCount( * This simple internal class is used to track and handle any DatanodeAdmin * events fired by the DatanodeAdminMonitor during tests. */ - private class DatanodeAdminHandler implements + private static class DatanodeAdminHandler implements EventHandler { private AtomicInteger invocation = new AtomicInteger(0); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java index 25d87445f32..07ef0b7d758 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java @@ -83,7 +83,7 @@ public void testHostStringsParseCorrectly() assertEquals(1234, def.getPort()); try { - def = new NodeDecommissionManager.HostDefinition("foobar:abcd"); + new NodeDecommissionManager.HostDefinition("foobar:abcd"); fail("InvalidHostStringException should have been thrown"); } catch (InvalidHostStringException e) { } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 5eaeedba12b..3c036d7d232 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -1227,12 +1227,10 @@ public void testScmRegisterNodeWith4LayerNetworkTopology() final int nodeCount = hostNames.length; // use default IP address to resolve node try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; for (int i = 0; i < nodeCount; i++) { DatanodeDetails node = createDatanodeDetails( UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); nodeManager.register(node, null, null); - nodes[i] = node; } // verify network topology cluster has all the registered nodes @@ -1272,12 +1270,10 @@ private void testScmRegisterNodeWithNetworkTopology(boolean useHostname) final int nodeCount = hostNames.length; // use default IP address to resolve node try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; for (int i = 0; i < nodeCount; i++) { DatanodeDetails node = createDatanodeDetails( UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); nodeManager.register(node, null, null); - nodes[i] = node; } // verify network topology cluster has all the registered nodes @@ -1375,7 +1371,6 @@ private void testGetNodesByAddress(boolean useHostname) } final int nodeCount = hostNames.length; try (SCMNodeManager nodeManager = createNodeManager(conf)) { - DatanodeDetails[] nodes = new DatanodeDetails[nodeCount]; for (int i = 0; i < nodeCount; i++) { DatanodeDetails node = createDatanodeDetails( UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 49545b5a662..9e5528783d2 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -84,10 +84,10 @@ * Test cases to verify PipelineManager. */ public class TestSCMPipelineManager { - private static MockNodeManager nodeManager; - private static File testDir; - private static OzoneConfiguration conf; - private static SCMMetadataStore scmMetadataStore; + private MockNodeManager nodeManager; + private File testDir; + private OzoneConfiguration conf; + private SCMMetadataStore scmMetadataStore; @Before public void setUp() throws Exception { @@ -585,8 +585,7 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() ratisProvider); try { - Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); fail("Pipelines should not have been created"); } catch (IOException e) { @@ -595,8 +594,7 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() // Ensure a pipeline of factor ONE can be created - no exceptions should be // raised. - Pipeline pipeline = pipelineManager - .createPipeline(HddsProtos.ReplicationType.RATIS, + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); // Simulate safemode check exiting. @@ -753,7 +751,9 @@ public void testScmWithPipelineDBKeyFormatChange() throws Exception { oldPipelines.values().forEach(p -> pipelineManager.containsPipeline(p.getId())); } finally { - newScmMetadataStore.stop(); + if (newScmMetadataStore != null) { + newScmMetadataStore.stop(); + } } // Mimicking another restart. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java index 8c4df07eb87..9110c927eb8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.pipeline.leader.choose.algorithms; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager; @@ -36,13 +35,10 @@ public class TestLeaderChoosePolicy { private OzoneConfiguration conf; - private ScmConfig scmConfig; - @Before public void setup() { //initialize network topology instance conf = new OzoneConfiguration(); - scmConfig = conf.getObject(ScmConfig.class); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 78313070b92..45decbc6cbe 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -63,9 +63,9 @@ */ public class TestSCMSafeModeManager { - private static EventQueue queue; + private EventQueue queue; private SCMSafeModeManager scmSafeModeManager; - private static OzoneConfiguration config; + private OzoneConfiguration config; private List containers = Collections.emptyList(); @Rule diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index 415196a20fa..8c567e92723 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -51,7 +51,7 @@ public class TestSCMBlockProtocolServer { private StorageContainerManager scm; private NodeManager nodeManager; private ScmBlockLocationProtocolServerSideTranslatorPB service; - private final int nodeCount = 10; + private static final int NODE_COUNT = 10; @Before public void setUp() throws Exception { @@ -64,7 +64,7 @@ public void setUp() throws Exception { scm.exitSafeMode(); // add nodes to scm node manager nodeManager = scm.getScmNodeManager(); - for (int i = 0; i < nodeCount; i++) { + for (int i = 0; i < NODE_COUNT; i++) { nodeManager.register(randomDatanodeDetails(), null, null); } @@ -95,7 +95,7 @@ public void testSortDatanodes() throws Exception { System.out.println("client = " + client); datanodeDetails.stream().forEach( node -> System.out.println(node.toString())); - Assert.assertTrue(datanodeDetails.size() == nodeCount); + Assert.assertTrue(datanodeDetails.size() == NODE_COUNT); // illegal client 1 client += "X"; @@ -103,14 +103,14 @@ public void testSortDatanodes() throws Exception { System.out.println("client = " + client); datanodeDetails.stream().forEach( node -> System.out.println(node.toString())); - Assert.assertTrue(datanodeDetails.size() == nodeCount); + Assert.assertTrue(datanodeDetails.size() == NODE_COUNT); // illegal client 2 client = "/default-rack"; datanodeDetails = server.sortDatanodes(nodes, client); System.out.println("client = " + client); datanodeDetails.stream().forEach( node -> System.out.println(node.toString())); - Assert.assertTrue(datanodeDetails.size() == nodeCount); + Assert.assertTrue(datanodeDetails.size() == NODE_COUNT); // unknown node to sort nodes.add(UUID.randomUUID().toString()); @@ -122,7 +122,7 @@ public void testSortDatanodes() throws Exception { .build(); ScmBlockLocationProtocolProtos.SortDatanodesResponseProto resp = service.sortDatanodes(request, CURRENT_VERSION); - Assert.assertTrue(resp.getNodeList().size() == nodeCount); + Assert.assertTrue(resp.getNodeList().size() == NODE_COUNT); System.out.println("client = " + client); resp.getNodeList().stream().forEach( node -> System.out.println(node.getNetworkName())); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java index 60a56e3ffbc..dc983b86fc3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestStorageContainerManagerStarter.java @@ -24,6 +24,8 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.regex.Matcher; import java.util.regex.Pattern; import static org.junit.Assert.*; @@ -39,13 +41,14 @@ public class TestStorageContainerManagerStarter { private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; private final PrintStream originalErr = System.err; + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); private MockSCMStarter mock; @Before - public void setUpStreams() { - System.setOut(new PrintStream(outContent)); - System.setErr(new PrintStream(errContent)); + public void setUpStreams() throws UnsupportedEncodingException { + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); mock = new MockSCMStarter(); } @@ -120,10 +123,11 @@ public void testGenClusterIdWithInvalidParamDoesNotRun() { } @Test - public void testUsagePrintedOnInvalidInput() { + public void testUsagePrintedOnInvalidInput() + throws UnsupportedEncodingException { executeCommand("--invalid"); Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage"); - Matcher m = p.matcher(errContent.toString()); + Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java index 9d428c298aa..048b9532cc3 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java @@ -339,7 +339,7 @@ public List processHeartbeat(DatanodeDetails dd) { @Override public Boolean isNodeRegistered( DatanodeDetails datanodeDetails) { - return null; + return false; } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java index fd652837755..a6bd7448e3b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.metrics2.MetricsRecordBuilder; import static org.apache.hadoop.test.MetricsAsserts.assertGauge; import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; @@ -84,14 +83,11 @@ public static void teardown() throws IOException { /** * Verifies heartbeat processing count. * - * @throws InterruptedException */ @Test - public void testHBProcessing() throws InterruptedException { + public void testHBProcessing() { long hbProcessed = getCounter("NumHBProcessed"); - NodeReportProto nodeReport = createNodeReport(); - nodeManager.processHeartbeat(registeredDatanode); assertEquals("NumHBProcessed", hbProcessed + 1, @@ -170,8 +166,6 @@ public void testNodeCountAndInfoMetricsReported() throws Exception { nodeManager.processNodeReport(registeredDatanode, nodeReport); - MetricsRecordBuilder metricsSource = getMetrics(SCMNodeMetrics.SOURCE_NAME); - assertGauge("InServiceHealthyNodes", 1, getMetrics(SCMNodeMetrics.class.getSimpleName())); assertGauge("InServiceStaleNodes", 0, diff --git a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java index 45d4d7b908c..ba8d46849b5 100644 --- a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java +++ b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestListInfoSubcommand.java @@ -25,6 +25,8 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.List; import java.util.UUID; @@ -47,12 +49,13 @@ public class TestListInfoSubcommand { private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; private final PrintStream originalErr = System.err; + private static final String DEFAULT_ENCODING = StandardCharsets.UTF_8.name(); @Before - public void setup() { + public void setup() throws UnsupportedEncodingException { cmd = new ListInfoSubcommand(); - System.setOut(new PrintStream(outContent)); - System.setErr(new PrintStream(errContent)); + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); } @After @@ -79,12 +82,12 @@ public void testDataNodeOperationalStateIncludedInOutput() throws Exception { // Pattern p = Pattern.compile( "^Operational State:\\s+IN_SERVICE$", Pattern.MULTILINE); - Matcher m = p.matcher(outContent.toString()); + Matcher m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); // Should also have a node with the state DECOMMISSIONING p = Pattern.compile( "^Operational State:\\s+DECOMMISSIONING$", Pattern.MULTILINE); - m = p.matcher(outContent.toString()); + m = p.matcher(outContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } diff --git a/hadoop-ozone/client/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/client/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..c15dd9d8faf --- /dev/null +++ b/hadoop-ozone/client/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,24 @@ + + + + + + + + + diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index c512a9005dd..0a138923b51 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -44,4 +44,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + diff --git a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml index 55abc263017..5c54f72ffab 100644 --- a/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/common/dev-support/findbugsExcludeFile.xml @@ -16,4 +16,21 @@ limitations under the License. --> + + + + + + + + + + + + + + + + + diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java index 17fc9b58996..27d8febbdb1 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java @@ -26,7 +26,7 @@ import java.util.HashMap; import java.util.List; -import java.util.Set; +import java.util.Map; import static org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.*; import static org.junit.Assert.assertEquals; @@ -110,15 +110,15 @@ public void testAclParse() { testMatrix.put(" anonymous::rw", Boolean.TRUE); testMatrix.put(" world:WORLD:rw", Boolean.TRUE); - Set keys = testMatrix.keySet(); - for (String key : keys) { - if (testMatrix.get(key)) { - OzoneAcl.parseAcl(key); + for (Map.Entry entry : testMatrix.entrySet()) { + if (entry.getValue()) { + OzoneAcl.parseAcl(entry.getKey()); } else { try { - OzoneAcl.parseAcl(key); + OzoneAcl.parseAcl(entry.getKey()); // should never get here since parseAcl will throw - fail("An exception was expected but did not happen. Key: " + key); + fail("An exception was expected but did not happen. Key: " + + entry.getKey()); } catch (IllegalArgumentException e) { // nothing to do } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java index 7e213554789..16285c20170 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java @@ -19,9 +19,7 @@ package org.apache.hadoop.ozone.om.lock; import java.util.ArrayList; -import java.util.LinkedList; import java.util.List; -import java.util.Queue; import java.util.Stack; import java.util.UUID; import java.util.concurrent.atomic.AtomicBoolean; @@ -162,7 +160,6 @@ public void testLockViolations() { OzoneManagerLock.Resource.values()) { Stack stack = new Stack<>(); List currentLocks = new ArrayList<>(); - Queue queue = new LinkedList<>(); for (OzoneManagerLock.Resource higherResource : OzoneManagerLock.Resource.values()) { if (higherResource.getMask() > resource.getMask()) { @@ -170,7 +167,6 @@ public void testLockViolations() { lock.acquireWriteLock(higherResource, resourceName); stack.push(new ResourceInfo(resourceName, higherResource)); currentLocks.add(higherResource.getName()); - queue.add(new ResourceInfo(resourceName, higherResource)); // try to acquire lower level lock try { resourceName = generateResourceName(resource); @@ -221,7 +217,7 @@ private String[] generateResourceName(OzoneManagerLock.Resource resource) { /** * Class used to store locked resource info. */ - public class ResourceInfo { + public static class ResourceInfo { private String[] lockName; private OzoneManagerLock.Resource resource; @@ -231,7 +227,7 @@ public class ResourceInfo { } public String[] getLockName() { - return lockName; + return lockName.clone(); } public OzoneManagerLock.Resource getResource() { diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 485f43af3fc..b2d4fc797a9 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -80,6 +80,13 @@ + + com.github.spotbugs + spotbugs-maven-plugin + + true + + diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..b6f6582f6c8 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,24 @@ + + + + + + + + + diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 5523150b858..f80a93854ce 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -54,4 +54,17 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + + diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java index 72fbb47bc28..eefad184717 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java @@ -68,7 +68,7 @@ private void fail() { f.fail(cluster); } catch (Throwable t) { LOG.info("Caught exception while inducing failure:{}", f.getName(), t); - System.exit(-2); + throw new RuntimeException(); } } diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java index 9eaa1667746..faa73463e80 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java @@ -50,14 +50,14 @@ public static List> getClassList() { * } */ - private final String keyNameDelimiter = "_"; + private static final String KEY_NAME_DELIMITER = "_"; public abstract void initialize() throws Exception; public abstract void generateLoad() throws Exception; String getKeyName(int keyIndex) { - return toString() + keyNameDelimiter + keyIndex; + return toString() + KEY_NAME_DELIMITER + keyIndex; } @Override diff --git a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java index 8a201fab260..cb9f93f1519 100644 --- a/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java +++ b/hadoop-ozone/insight/src/test/java/org/apache/hadoop/ozone/insight/TestConfigurationSubCommand.java @@ -19,6 +19,8 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; +import java.nio.charset.StandardCharsets; import org.apache.hadoop.hdds.conf.Config; import org.apache.hadoop.hdds.conf.ConfigGroup; @@ -41,7 +43,7 @@ public class TestConfigurationSubCommand { @Before public void setup() throws Exception { - System.setOut(new PrintStream(out)); + System.setOut(new PrintStream(out, false, StandardCharsets.UTF_8.name())); } @After @@ -50,14 +52,14 @@ public void reset() { } @Test - public void testPrintConfig() { + public void testPrintConfig() throws UnsupportedEncodingException { OzoneConfiguration conf = new OzoneConfiguration(); conf.set("ozone.scm.client.address", "omclient"); ConfigurationSubCommand subCommand = new ConfigurationSubCommand(); subCommand.printConfig(CustomConfig.class, conf); - final String output = out.toString(); + final String output = out.toString(StandardCharsets.UTF_8.name()); Assert.assertTrue(output.contains(">>> ozone.scm.client.address")); Assert.assertTrue(output.contains("default: localhost")); Assert.assertTrue(output.contains("current: omclient")); diff --git a/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..0bf7ea4e22e --- /dev/null +++ b/hadoop-ozone/integration-test/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,131 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index dfe7dd05e0f..aaeada6d507 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -191,4 +191,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java index 809e6d1b90b..a1ac77e7ad8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractUnbufferTest.java @@ -133,6 +133,7 @@ private void unbuffer(FSDataInputStream stream) throws IOException { protected void validateFullFileContents(FSDataInputStream stream) throws IOException { + assertNotNull(stream); validateFileContents(stream, TEST_FILE_LEN, 0); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index 5655f85a283..0d39a2bb69e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -135,7 +135,7 @@ public void testO3FSMultiByteRead() throws IOException { System.arraycopy(tmp, 0, value, i * tmp.length, tmp.length); i++; } - Assert.assertEquals(i * tmp.length, data.length); + Assert.assertEquals((long) i * tmp.length, data.length); Assert.assertArrayEquals(value, data); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java index 19f7c15cfeb..2a9ea8785c1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java @@ -93,8 +93,7 @@ public void init() throws Exception { cluster.waitForClusterToBeReady(); // create a volume and a bucket to be used by OzoneFileSystem - OzoneBucket bucket = - TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); + TestDataUtil.createVolumeAndBucket(cluster, volumeName, bucketName); rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName, volumeName); @@ -431,8 +430,8 @@ private void createKey(OzoneBucket ozoneBucket, String key, int length, ozoneInputStream.read(read, 0, length); ozoneInputStream.close(); - String inputString = new String(input); - Assert.assertEquals(inputString, new String(read)); + String inputString = new String(input, UTF_8); + Assert.assertEquals(inputString, new String(read, UTF_8)); // Read using filesystem. FSDataInputStream fsDataInputStream = o3fs.open(new Path(key)); @@ -440,7 +439,7 @@ private void createKey(OzoneBucket ozoneBucket, String key, int length, fsDataInputStream.read(read, 0, length); ozoneInputStream.close(); - Assert.assertEquals(inputString, new String(read)); + Assert.assertEquals(inputString, new String(read, UTF_8)); } private void checkPath(Path path) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java index 2b8803edc41..fbdd5d4a8c6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java @@ -87,7 +87,6 @@ public class TestOzoneFileInterfaces { public Timeout timeout = new Timeout(300000); private String rootPath; - private String userName; /** * Parameter class to set absolute url/defaultFS handling. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java index cd4e4b2fa7f..6b64b49468b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java @@ -53,6 +53,7 @@ import org.apache.commons.io.IOUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.TRASH_PREFIX; import static org.apache.hadoop.fs.ozone.Constants.LISTING_PAGE_SIZE; @@ -589,7 +590,7 @@ public void testListStatusOnSubDirs() throws Exception { @Test public void testSeekOnFileLength() throws IOException { Path file = new Path("/file"); - ContractTestUtils.createFile(fs, file, true, "a".getBytes()); + ContractTestUtils.createFile(fs, file, true, "a".getBytes(UTF_8)); try (FSDataInputStream stream = fs.open(file)) { long fileLength = fs.getFileStatus(file).getLen(); stream.seek(fileLength); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java index cbe84b6ad7a..dea9a580244 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java @@ -39,6 +39,7 @@ import java.io.IOException; import java.util.HashMap; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; @@ -164,7 +165,7 @@ public void testReportProcessingMetrics() throws Exception { new HashMap<>()); String data = "file data"; - ozoneOutputStream.write(data.getBytes(), 0, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index ecf1c2f05ac..08cc975a9e9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -46,9 +46,9 @@ @Ignore public class TestLeaderChoosePolicy { - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); - private static PipelineManager pipelineManager; + private PipelineManager pipelineManager; public void init(int numDatanodes, int datanodePipelineLimit) throws Exception { @@ -94,8 +94,8 @@ private void checkLeaderBalance(int dnNum, int leaderNumOfEachDn) } Assert.assertTrue(leaderCount.size() == dnNum); - for (UUID key : leaderCount.keySet()) { - Assert.assertTrue(leaderCount.get(key) == leaderNumOfEachDn); + for (Map.Entry entry: leaderCount.entrySet()) { + Assert.assertTrue(leaderCount.get(entry.getKey()) == leaderNumOfEachDn); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java index 42acb12489f..6fe2aec89fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java @@ -54,12 +54,12 @@ public class TestNode2PipelineMap { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - private static ContainerWithPipeline ratisContainer; - private static ContainerManager containerManager; - private static PipelineManager pipelineManager; + private MiniOzoneCluster cluster; + private OzoneConfiguration conf; + private StorageContainerManager scm; + private ContainerWithPipeline ratisContainer; + private ContainerManager containerManager; + private PipelineManager pipelineManager; /** * Create a MiniDFSCluster for testing. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index b16add04048..785e494ea48 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -47,9 +47,9 @@ @Ignore public class TestRatisPipelineCreateAndDestroy { - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); - private static PipelineManager pipelineManager; + private PipelineManager pipelineManager; public void init(int numDatanodes) throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java index 39b67ac2aeb..47cb135415f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java @@ -49,7 +49,7 @@ @Ignore public class TestSCMSafeModeWithPipelineRules { - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private PipelineManager pipelineManager; private MiniOzoneCluster.Builder clusterBuilder; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index c1a7f53fcf8..4df5f222290 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -250,7 +250,7 @@ public void stopOzoneManager(String omNodeId) { */ public static class Builder extends MiniOzoneClusterImpl.Builder { - private final String nodeIdBaseStr = "omNode-"; + private static final String NODE_ID_PREFIX = "omNode-"; private List activeOMs = new ArrayList<>(); private List inactiveOMs = new ArrayList<>(); @@ -353,7 +353,7 @@ protected List createOMService() throws IOException, for (int i = 1; i<= numOfOMs; i++) { // Set nodeId - String nodeId = nodeIdBaseStr + i; + String nodeId = NODE_ID_PREFIX + i; OzoneConfiguration config = new OzoneConfiguration(conf); config.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, nodeId); // Set the OM http(s) address to null so that the cluster picks @@ -424,7 +424,7 @@ private void initHAConfig(int basePort) throws IOException { int port = basePort; for (int i = 1; i <= numOfOMs; i++, port+=6) { - String omNodeId = nodeIdBaseStr + i; + String omNodeId = NODE_ID_PREFIX + i; omNodesKeyValue.append(",").append(omNodeId); String omAddrKey = OmUtils.addKeySuffixes( OMConfigKeys.OZONE_OM_ADDRESS_KEY, omServiceId, omNodeId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java index e1f139c0d34..d6afbec563d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerStateMachineIdempotency.java @@ -43,9 +43,12 @@ import org.junit.Test; import java.io.IOException; + import org.junit.Rule; import org.junit.rules.Timeout; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Tests the idempotent operations in ContainerStateMachine. */ @@ -97,7 +100,8 @@ public void testContainerStateMachineIdempotency() throws Exception { // call create Container again BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(); + RandomStringUtils.random(RandomUtils.nextInt(0, 1024)) + .getBytes(UTF_8); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper .getWriteChunkRequest(container.getPipeline(), blockID, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java index 362701c1b34..3a4df24d681 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDataUtil.java @@ -36,6 +36,8 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.hadoop.ozone.om.exceptions.OMException; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Utility to help to generate test data. */ @@ -83,14 +85,14 @@ public static void createKey(OzoneBucket bucket, String keyName, try (OutputStream stream = bucket .createKey(keyName, content.length(), repType, repFactor, new HashMap<>())) { - stream.write(content.getBytes()); + stream.write(content.getBytes(UTF_8)); } } public static String getKey(OzoneBucket bucket, String keyName) throws IOException { try (InputStream stream = bucket.readKey(keyName)) { - return new Scanner(stream).useDelimiter("\\A").next(); + return new Scanner(stream, UTF_8.name()).useDelimiter("\\A").next(); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index 4f4b53f3d00..4df0f6618b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.minikdc.MiniKdc; @@ -45,7 +44,6 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.UserGroupInformation; @@ -282,8 +280,6 @@ public void testDelegationToken() throws Exception { om.getScmClient().getBlockClient().close(); om.getScmClient().getContainerClient().close(); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); try { // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java index 7090ec9790f..5ff02ad5330 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java @@ -19,9 +19,11 @@ package org.apache.hadoop.ozone; import java.io.File; +import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileReader; import java.io.IOException; +import java.io.InputStreamReader; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.HashSet; import java.util.List; @@ -154,7 +156,8 @@ public void testDatanodeIDPersistent() throws Exception { // Validate using yaml parser Yaml yaml = new Yaml(); try { - yaml.load(new FileReader(validIdsFile)); + yaml.load(new InputStreamReader(new FileInputStream(validIdsFile), + StandardCharsets.UTF_8)); } catch (Exception e) { Assert.fail("Failed parsing datanode id yaml."); } @@ -283,9 +286,15 @@ private void createMalformedIDFile(File malformedFile) DatanodeDetails id = randomDatanodeDetails(); ContainerUtils.writeDatanodeDetailsTo(id, malformedFile); - FileOutputStream out = new FileOutputStream(malformedFile); - out.write("malformed".getBytes()); - out.close(); + FileOutputStream out = null; + try { + out = new FileOutputStream(malformedFile); + out.write("malformed".getBytes(StandardCharsets.UTF_8)); + } finally { + if (out != null) { + out.close(); + } + } } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java index 051eb94d582..23062757730 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneHACluster.java @@ -19,8 +19,6 @@ package org.apache.hadoop.ozone; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; @@ -46,7 +44,6 @@ public class TestMiniOzoneHACluster { private MiniOzoneHAClusterImpl cluster = null; - private ObjectStore objectStore; private OzoneConfiguration conf; private String clusterId; private String scmId; @@ -81,8 +78,6 @@ public void init() throws Exception { .setNumOfOzoneManagers(numOfOMs) .build(); cluster.waitForClusterToBeReady(); - objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) - .getObjectStore(); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 291f19f88e4..a8107e928b9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -63,7 +62,6 @@ import org.apache.hadoop.ozone.om.helpers.S3SecretValue; import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; import org.apache.hadoop.ozone.security.OzoneTokenIdentifier; import org.apache.hadoop.security.KerberosAuthException; import org.apache.hadoop.security.SaslRpcServer.AuthMethod; @@ -411,7 +409,6 @@ public void testAccessControlExceptionOnClient() throws Exception { LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); setupOm(conf); - long omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class); try { om.setCertClient(new CertificateClientTestImpl(conf)); om.start(); @@ -475,8 +472,6 @@ public void testDelegationTokenRenewal() throws Exception { int tokenMaxLifetime = 1000; newConf.setLong(DELEGATION_TOKEN_MAX_LIFETIME_KEY, tokenMaxLifetime); setupOm(newConf); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); OzoneManager.setTestSecureOmFlag(true); // Start OM @@ -564,8 +559,6 @@ public void testGetS3Secret() throws Exception { // Setup secure OM for start setupOm(conf); - long omVersion = - RPC.getProtocolVersion(OzoneManagerProtocolPB.class); try { // Start OM om.setCertClient(new CertificateClientTestImpl(conf)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java index fe0e075379a..37476fbecbb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java @@ -145,9 +145,8 @@ public List getAllBlocks(Long containeID) throws IOException { public boolean verifyBlocksWithTxnTable(Map> containerBlocks) throws IOException { - Set containerIDs = containerBlocks.keySet(); - for (Long entry : containerIDs) { - ReferenceCountedDB meta = getContainerMetadata(entry); + for (Map.Entry> entry : containerBlocks.entrySet()) { + ReferenceCountedDB meta = getContainerMetadata(entry.getKey()); DatanodeStore ds = meta.getStore(); DatanodeStoreSchemaTwoImpl dnStoreTwoImpl = (DatanodeStoreSchemaTwoImpl) ds; @@ -159,7 +158,7 @@ public boolean verifyBlocksWithTxnTable(Map> containerBlocks) txnsInTxnTable) { conID.addAll(txn.getValue().getLocalIDList()); } - if (!conID.equals(containerBlocks.get(entry))) { + if (!conID.equals(containerBlocks.get(entry.getKey()))) { return false; } meta.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java index af17f303267..d406ba4f9da 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBCSID.java @@ -41,6 +41,7 @@ import java.util.List; import java.util.concurrent.TimeUnit; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys. HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys. @@ -121,7 +122,7 @@ public void testBCSID() throws Exception { objectStore.getVolume(volumeName).getBucket(bucketName) .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.close(); // get the name of a valid container. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 46d48aeca74..b3c7d8ef1f5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -71,7 +71,7 @@ public class TestBlockOutputStreamWithFailures { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; private ObjectStore objectStore; @@ -246,7 +246,7 @@ public void testWatchForCommitWithCloseContainerException() Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -332,7 +332,7 @@ public void testWatchForCommitDatanodeFailure() throws Exception { Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -501,7 +501,7 @@ public void testFailureWithPrimeSizedData() throws Exception { Assert.assertTrue(keyOutputStream.getLocationInfoList().size() == 0); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -572,7 +572,7 @@ public void testExceptionDuringClose() throws Exception { Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -663,7 +663,7 @@ public void testWatchForCommitWithSingleNodeRatis() throws Exception { Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size()); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -756,7 +756,7 @@ public void testDatanodeFailureWithSingleNodeRatis() throws Exception { // Written the same data twice String dataString = new String(data1, UTF_8); cluster.restartHddsDatanode(pipeline.getNodes().get(0), true); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -849,7 +849,7 @@ public void testDatanodeFailureWithPreAllocation() throws Exception { // Written the same data twice String dataString = new String(data1, UTF_8); cluster.restartHddsDatanode(pipeline.getNodes().get(0), true); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java index 21e43e64d68..21dde59e4e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailuresFlushDelay.java @@ -72,7 +72,7 @@ public class TestBlockOutputStreamWithFailuresFlushDelay { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; private ObjectStore objectStore; @@ -247,7 +247,7 @@ public void testWatchForCommitWithCloseContainerException() Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -334,7 +334,7 @@ public void testWatchForCommitDatanodeFailure() throws Exception { Assert.assertEquals(0, keyOutputStream.getStreamEntries().size()); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -503,7 +503,7 @@ public void testFailureWithPrimeSizedData() throws Exception { Assert.assertTrue(keyOutputStream.getLocationInfoList().size() == 0); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -574,7 +574,7 @@ public void testExceptionDuringClose() throws Exception { Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 0); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -665,7 +665,7 @@ public void testWatchForCommitWithSingleNodeRatis() throws Exception { Assert.assertEquals(0, keyOutputStream.getLocationInfoList().size()); // Written the same data twice String dataString = new String(data1, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -758,7 +758,7 @@ public void testDatanodeFailureWithSingleNodeRatis() throws Exception { // Written the same data twice String dataString = new String(data1, UTF_8); cluster.restartHddsDatanode(pipeline.getNodes().get(0), true); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } @Test @@ -851,7 +851,7 @@ public void testDatanodeFailureWithPreAllocation() throws Exception { // Written the same data twice String dataString = new String(data1, UTF_8); cluster.restartHddsDatanode(pipeline.getNodes().get(0), true); - validateData(keyName, dataString.concat(dataString).getBytes()); + validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java index 116c0cf73d1..47bdb3e1419 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCloseContainerHandlingByClient.java @@ -422,7 +422,7 @@ public void testBlockWrites() throws Exception { // read the key from OM again and match the length.The length will still // be the equal to the original data size. OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(5 * chunkSize, keyInfo.getDataSize()); + Assert.assertEquals((long) 5 * chunkSize, keyInfo.getDataSize()); // Written the same data twice String dataString = new String(data1, UTF_8); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java index a658cf449ce..eecf78881f3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestCommitWatcher.java @@ -77,18 +77,18 @@ public class TestCommitWatcher { */ @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static long flushSize; - private static long maxFlushSize; - private static long blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private MiniOzoneCluster cluster; + private OzoneConfiguration conf = new OzoneConfiguration(); + private OzoneClient client; + private ObjectStore objectStore; + private int chunkSize; + private long flushSize; + private long maxFlushSize; + private long blockSize; + private String volumeName; + private String bucketName; + private String keyString; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; /** @@ -101,7 +101,7 @@ public class TestCommitWatcher { @Before public void init() throws Exception { chunkSize = (int)(1 * OzoneConsts.MB); - flushSize = 2 * chunkSize; + flushSize = (long) 2 * chunkSize; maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; // Make sure the pipeline does not get destroyed quickly @@ -217,16 +217,14 @@ public void testReleaseBuffers() throws Exception { CompletableFuture future2 = futures.get(1); future1.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(chunkSize))); + Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize)); Assert.assertTrue( - watcher.getFutureMap().get(new Long(chunkSize)).equals(future1)); + watcher.getFutureMap().get((long) chunkSize).equals(future1)); // wait on 2nd putBlock to complete future2.get(); - Assert.assertNotNull(watcher.getFutureMap().get( - new Long(2 * chunkSize))); + Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize)); Assert.assertTrue( - watcher.getFutureMap().get(new Long(2 * chunkSize)). - equals(future2)); + watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2)); Assert.assertTrue(watcher. getCommitIndex2flushedDataMap().size() == 2); watcher.watchOnFirstIndex(); @@ -294,14 +292,14 @@ public void testReleaseBuffersOnException() throws Exception { CompletableFuture future2 = futures.get(1); future1.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(chunkSize))); + Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize)); Assert.assertTrue( - watcher.getFutureMap().get(new Long(chunkSize)).equals(future1)); + watcher.getFutureMap().get((long) chunkSize).equals(future1)); // wait on 2nd putBlock to complete future2.get(); - Assert.assertNotNull(watcher.getFutureMap().get(new Long(2 * chunkSize))); + Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize)); Assert.assertTrue( - watcher.getFutureMap().get(new Long(2 * chunkSize)).equals(future2)); + watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2)); Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2); watcher.watchOnFirstIndex(); Assert.assertFalse(watcher.getCommitIndex2flushedDataMap() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index d9f75788ec8..1bbc6355bca 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -56,6 +56,7 @@ import java.util.concurrent.TimeUnit; import java.util.function.Predicate; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -147,7 +148,7 @@ public void testContainerReplication() throws Exception { objectStore.getVolume(volumeName).getBucket(bucketName) .createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); - byte[] testData = "ratis".getBytes(); + byte[] testData = "ratis".getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(testData); key.flush(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index fbd9becd61d..9057b0ca7db 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.test.GenericTestUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -141,9 +142,9 @@ public void testContainerStateMachineFailures() throws Exception { .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); //get the name of a valid container KeyOutputStream groupOutputStream = @@ -187,9 +188,9 @@ public void testRatisSnapshotRetention() throws Exception { .createKey(("ratis" + i), 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write(("ratis" + i).getBytes()); + key.write(("ratis" + i).getBytes(UTF_8)); key.flush(); - key.write(("ratis" + i).getBytes()); + key.write(("ratis" + i).getBytes(UTF_8)); key.close(); } @@ -211,9 +212,9 @@ public void testRatisSnapshotRetention() throws Exception { .createKey(("ratis" + i), 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write(("ratis" + i).getBytes()); + key.write(("ratis" + i).getBytes(UTF_8)); key.flush(); - key.write(("ratis" + i).getBytes()); + key.write(("ratis" + i).getBytes(UTF_8)); key.close(); } stateMachine = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java index e00d5d07bc4..fe5b11e2764 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailureOnRead.java @@ -46,6 +46,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.test.GenericTestUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; @@ -169,7 +170,7 @@ public void testReadStateMachineFailureClosesPipeline() throws Exception { .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); // get the name of a valid container diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java index 34f6964be94..58a236f74b5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java @@ -64,6 +64,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.test.LambdaTestUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; @@ -94,6 +95,7 @@ public class TestContainerStateMachineFailures { private static String volumeName; private static String bucketName; private static XceiverClientManager xceiverClientManager; + private static Random random; /** * Create a MiniDFSCluster for testing. @@ -151,6 +153,7 @@ public static void init() throws Exception { bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); + random = new Random(); } /** @@ -169,7 +172,7 @@ public void testContainerStateMachineFailures() throws Exception { objectStore.getVolume(volumeName).getBucket(bucketName) .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - byte[] testData = "ratis".getBytes(); + byte[] testData = "ratis".getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(testData); key.flush(); @@ -226,9 +229,9 @@ public void testUnhealthyContainer() throws Exception { .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key .getOutputStream(); List locationInfoList = @@ -310,9 +313,9 @@ public void testApplyTransactionFailure() throws Exception { .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key. getOutputStream(); List locationInfoList = @@ -400,9 +403,9 @@ public void testApplyTransactionIdempotencyWithClosedContainer() .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); @@ -473,9 +476,9 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() .createKey("ratis-1", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key .getOutputStream(); List locationInfoList = @@ -530,7 +533,7 @@ public void testWriteStateMachineDataIdempotencyWithClosedContainer() try { xceiverClient.sendCommand(ContainerTestHelper .getWriteChunkRequest(pipeline, omKeyLocationInfo.getBlockID(), - 1024, new Random().nextInt(), null)); + 1024, random.nextInt(), null)); latch.countDown(); } catch (IOException e) { latch.countDown(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java index 12086526ed9..f3bc5c104ef 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFlushDelay.java @@ -68,11 +68,11 @@ public class TestContainerStateMachineFlushDelay { private String volumeName; private String bucketName; private String path; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String keyString; + private int chunkSize; + private int flushSize; + private int maxFlushSize; + private int blockSize; + private String keyString; /** * Create a MiniDFSCluster for testing. @@ -149,7 +149,7 @@ public void testContainerStateMachineFailures() throws Exception { // First write and flush creates a container in the datanode key.write(data); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); //get the name of a valid container KeyOutputStream groupOutputStream = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java index 044ac910016..58ef998dcfe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java @@ -61,6 +61,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.test.GenericTestUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT; @@ -197,7 +198,7 @@ public void testDeleteKeyWithSlowFollower() throws Exception { objectStore.getVolume(volumeName).getBucket(bucketName) .createKey(keyName, 0, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); - byte[] testData = "ratis".getBytes(); + byte[] testData = "ratis".getBytes(UTF_8); // First write and flush creates a container in the datanode key.write(testData); key.flush(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index b44427b18d2..cc9966a77d1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.client.rpc; import java.io.IOException; +import static java.nio.charset.StandardCharsets.UTF_8; import java.time.Duration; import java.util.ArrayList; import java.util.Collections; @@ -167,7 +168,8 @@ public void testBlockWritesWithDnFailures() throws Exception { OzoneOutputStream key = createKey(keyName, ReplicationType.RATIS, 0); byte[] data = ContainerTestHelper - .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes(); + .getFixedLengthString(keyString, chunkSize + chunkSize / 2).getBytes( + UTF_8); key.write(data); // get the name of a valid container @@ -209,7 +211,7 @@ public void testWriteSmallFile() throws Exception { createKey(keyName, ReplicationType.RATIS, 0); String data = ContainerTestHelper .getFixedLengthString(keyString, chunkSize/2); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); // get the name of a valid container Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); KeyOutputStream keyOutputStream = @@ -242,8 +244,9 @@ public void testWriteSmallFile() throws Exception { Assert.assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.getBytes()); + Assert.assertEquals(data.getBytes(UTF_8).length, + keyInfo.getDataSize()); + validateData(keyName, data.getBytes(UTF_8)); } @@ -266,7 +269,7 @@ public void testContainerExclusionWithClosedContainerException() // Assert that 1 block will be preallocated Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); BlockID blockId = streamEntryList.get(0).getBlockID(); @@ -279,7 +282,7 @@ public void testContainerExclusionWithClosedContainerException() // This write will hit ClosedContainerException and this container should // will be added in the excludelist - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds() @@ -302,8 +305,9 @@ public void testContainerExclusionWithClosedContainerException() Assert.assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(2 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).getBytes()); + Assert.assertEquals(2 * data.getBytes(UTF_8).length, + keyInfo.getDataSize()); + validateData(keyName, data.concat(data).getBytes(UTF_8)); } @Test @@ -325,7 +329,7 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { // Assert that 1 block will be preallocated Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); BlockID blockId = streamEntryList.get(0).getBlockID(); @@ -341,8 +345,8 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { // next write ops. cluster.shutdownHddsDatanode(datanodes.get(0)); - key.write(data.getBytes()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); + key.write(data.getBytes(UTF_8)); key.flush(); Assert.assertTrue(keyOutputStream.getExcludeList().getDatanodes() @@ -365,8 +369,8 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { Assert.assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).concat(data).getBytes()); + Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8)); } @@ -388,7 +392,7 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { // Assert that 1 block will be preallocated Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); BlockID blockId = streamEntryList.get(0).getBlockID(); @@ -405,8 +409,8 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { cluster.shutdownHddsDatanode(datanodes.get(0)); cluster.shutdownHddsDatanode(datanodes.get(1)); - key.write(data.getBytes()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); + key.write(data.getBytes(UTF_8)); key.flush(); Assert.assertTrue(keyOutputStream.getExcludeList().getPipelineIds() .contains(pipeline.getId())); @@ -428,8 +432,8 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { Assert.assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).concat(data).getBytes()); + Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 76027f7e295..c7dfacc682f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -55,6 +55,7 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -177,7 +178,7 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { // Assert that 1 block will be preallocated Assert.assertEquals(1, streamEntryList.size()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); BlockID blockId = streamEntryList.get(0).getBlockID(); @@ -194,7 +195,7 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { cluster.shutdownHddsDatanode(datanodes.get(0)); cluster.shutdownHddsDatanode(datanodes.get(1)); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); Assert.assertTrue( keyOutputStream.getExcludeList().getContainerIds().isEmpty()); @@ -202,7 +203,7 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { keyOutputStream.getExcludeList().getDatanodes().isEmpty()); Assert.assertTrue( keyOutputStream.getExcludeList().getDatanodes().isEmpty()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); // The close will just write to the buffer key.close(); @@ -217,8 +218,8 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { Assert.assertNotEquals( keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly().get(0) .getBlockID(), blockId); - Assert.assertEquals(3 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).concat(data).getBytes()); + Assert.assertEquals(3 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + validateData(keyName, data.concat(data).concat(data).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java index e8af69a726e..afe7ae4adaf 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestHybridPipelineOnDatanode.java @@ -42,6 +42,7 @@ import org.junit.Test; import java.io.IOException; +import static java.nio.charset.StandardCharsets.UTF_8; import java.util.Arrays; import java.util.List; import java.util.UUID; @@ -103,7 +104,7 @@ public void testHybridPipelineOnDatanode() throws IOException { String bucketName = UUID.randomUUID().toString(); String value = UUID.randomUUID().toString(); - byte[] data = value.getBytes(); + byte[] data = value.getBytes(UTF_8); objectStore.createVolume(volumeName); OzoneVolume volume = objectStore.getVolume(volumeName); volume.createBucket(bucketName); @@ -114,7 +115,7 @@ public void testHybridPipelineOnDatanode() throws IOException { OzoneOutputStream out = bucket .createKey(keyName1, data.length, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); String keyName2 = UUID.randomUUID().toString(); @@ -123,7 +124,7 @@ public void testHybridPipelineOnDatanode() throws IOException { out = bucket .createKey(keyName2, data.length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); // We need to find the location of the chunk file corresponding to the diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java index 2cb352de496..85f49776e1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestKeyInputStream.java @@ -88,18 +88,18 @@ public class TestKeyInputStream { private static final int TIMEOUT = 300_000; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; - private static ChunkLayoutTestInfo chunkLayout; + private MiniOzoneCluster cluster; + private OzoneConfiguration conf = new OzoneConfiguration(); + private OzoneClient client; + private ObjectStore objectStore; + private int chunkSize; + private int flushSize; + private int maxFlushSize; + private int blockSize; + private String volumeName; + private String bucketName; + private String keyString; + private ChunkLayoutTestInfo chunkLayout; @Parameterized.Parameters public static Collection layouts() { @@ -471,7 +471,7 @@ private void testReadAfterReplication(boolean doUnbuffer) throws Exception { } } - private static void waitForNodeToBecomeDead( + private void waitForNodeToBecomeDead( DatanodeDetails datanode) throws TimeoutException, InterruptedException { GenericTestUtils.waitFor(() -> HddsProtos.NodeState.DEAD == getNodeHealth(datanode), @@ -480,7 +480,7 @@ private static void waitForNodeToBecomeDead( getNodeHealth(datanode)); } - private static HddsProtos.NodeState getNodeHealth(DatanodeDetails dn) { + private HddsProtos.NodeState getNodeHealth(DatanodeDetails dn) { HddsProtos.NodeState health = null; try { NodeManager nodeManager = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index b435ce98057..39a81ff2f43 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.client.rpc; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -53,6 +52,8 @@ import org.junit.Rule; import org.junit.rules.Timeout; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.*; /** @@ -147,7 +148,7 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { String data = ContainerTestHelper .getFixedLengthString(keyString, blockSize + chunkSize); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); // get the name of a valid container Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); @@ -169,7 +170,7 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { // The write will fail but exception will be handled and length will be // updated correctly in OzoneManager once the steam is closed - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) @@ -177,8 +178,8 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(2 * data.getBytes().length, keyInfo.getDataSize()); - validateData(keyName, data.concat(data).getBytes()); + Assert.assertEquals(2 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); + validateData(keyName, data.concat(data).getBytes(UTF_8)); } @Test @@ -190,7 +191,7 @@ public void testMultiBlockWritesWithIntermittentDnFailures() createKey(keyName, ReplicationType.RATIS, 6 * blockSize); String data = ContainerTestHelper .getFixedLengthString(keyString, blockSize + chunkSize); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); // get the name of a valid container Assert.assertTrue(key.getOutputStream() instanceof KeyOutputStream); @@ -201,10 +202,9 @@ public void testMultiBlockWritesWithIntermittentDnFailures() // Assert that 6 block will be preallocated Assert.assertEquals(6, streamEntryList.size()); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.flush(); long containerId = streamEntryList.get(0).getBlockID().getContainerID(); - BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() .getContainer(ContainerID.valueof(containerId)); @@ -216,11 +216,11 @@ public void testMultiBlockWritesWithIntermittentDnFailures() // The write will fail but exception will be handled and length will be // updated correctly in OzoneManager once the steam is closed - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); // shutdown the second datanode cluster.shutdownHddsDatanode(datanodes.get(1)); - key.write(data.getBytes()); + key.write(data.getBytes(UTF_8)); key.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) .setBucketName(bucketName).setType(HddsProtos.ReplicationType.RATIS) @@ -228,9 +228,9 @@ public void testMultiBlockWritesWithIntermittentDnFailures() .setRefreshPipeline(true) .build(); OmKeyInfo keyInfo = cluster.getOzoneManager().lookupKey(keyArgs); - Assert.assertEquals(4 * data.getBytes().length, keyInfo.getDataSize()); + Assert.assertEquals(4 * data.getBytes(UTF_8).length, keyInfo.getDataSize()); validateData(keyName, - data.concat(data).concat(data).concat(data).getBytes()); + data.concat(data).concat(data).concat(data).getBytes(UTF_8)); } private OzoneOutputStream createKey(String keyName, ReplicationType type, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index 14bce991bdb..6c3c9047c1e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -66,7 +66,7 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; private ObjectStore objectStore; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java index af3ec902523..cd761d21b16 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java @@ -62,7 +62,7 @@ /** * Tests failure detection and handling in BlockOutputStream Class. */ -public class TestOzoneClientRetriesOnException { +public class TestOzoneClientRetriesOnExceptions { private static final int MAX_RETRIES = 3; @@ -72,7 +72,7 @@ public class TestOzoneClientRetriesOnException { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; + private MiniOzoneCluster cluster; private OzoneConfiguration conf = new OzoneConfiguration(); private OzoneClient client; private ObjectStore objectStore; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index 6c745578b77..4508ade1015 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -285,7 +285,7 @@ public void testSetAndClrQuota() throws Exception { String bucketName = UUID.randomUUID().toString(); String bucketName2 = UUID.randomUUID().toString(); String value = "sample value"; - int valueLength = value.getBytes().length; + int valueLength = value.getBytes(UTF_8).length; OzoneVolume volume = null; store.createVolume(volumeName); @@ -819,19 +819,19 @@ public void testPutKey() throws IOException { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, STAND_ALONE, + value.getBytes(UTF_8).length, STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; + byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, STAND_ALONE, ONE)); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); Assert.assertFalse(key.getModificationTime().isBefore(testStartTime)); } @@ -846,7 +846,7 @@ public void testCheckUsedBytesQuota() throws IOException { String value = "sample value"; int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - int valueLength = value.getBytes().length; + int valueLength = value.getBytes(UTF_8).length; int countException = 0; store.createVolume(volumeName); @@ -895,7 +895,7 @@ public void testCheckUsedBytesQuota() throws IOException { OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), valueLength, STAND_ALONE, ONE, new HashMap<>()); for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); } out.close(); } catch (IOException ex) { @@ -1028,7 +1028,7 @@ private void writeKey(OzoneBucket bucket, String keyName, throws IOException{ OzoneOutputStream out = bucket.createKey(keyName, valueLength, STAND_ALONE, replication, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); } @@ -1037,7 +1037,7 @@ private void writeFile(OzoneBucket bucket, String keyName, throws IOException{ OzoneOutputStream out = bucket.createFile(keyName, valueLength, STAND_ALONE, replication, true, true); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); } @@ -1048,9 +1048,9 @@ public void testUsedBytesWithUploadPart() throws IOException { String keyName = UUID.randomUUID().toString(); int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - String sampleData = generateData(blockSize + 100, - (byte) RandomUtils.nextLong()).toString(); - int valueLength = sampleData.getBytes().length; + String sampleData = Arrays.toString(generateData(blockSize + 100, + (byte) RandomUtils.nextLong())); + int valueLength = sampleData.getBytes(UTF_8).length; store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); @@ -1096,7 +1096,7 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, 0, STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); builder.setVolumeName(volumeName).setBucketName(bucketName) @@ -1108,10 +1108,10 @@ public void testValidateBlockLengthWithCommitKey() throws IOException { // LocationList should have only 1 block Assert.assertEquals(1, locationInfoList.size()); // make sure the data block size is updated - Assert.assertEquals(value.getBytes().length, + Assert.assertEquals(value.getBytes(UTF_8).length, locationInfoList.get(0).getLength()); // make sure the total data size is set correctly - Assert.assertEquals(value.getBytes().length, keyInfo.getDataSize()); + Assert.assertEquals(value.getBytes(UTF_8).length, keyInfo.getDataSize()); } @Test @@ -1130,19 +1130,19 @@ public void testPutKeyRatisOneNode() throws IOException { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; + byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); is.close(); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.RATIS, ONE)); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); Assert.assertFalse(key.getModificationTime().isBefore(testStartTime)); } @@ -1164,20 +1164,20 @@ public void testPutKeyRatisThreeNodes() throws IOException { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; + byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); is.close(); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.RATIS, THREE)); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); Assert.assertFalse(key.getModificationTime().isBefore(testStartTime)); } @@ -1202,23 +1202,23 @@ public void testPutKeyRatisThreeNodesParallel() throws IOException, try { for (int i = 0; i < 5; i++) { String keyName = UUID.randomUUID().toString(); - String data = generateData(5 * 1024 * 1024, - (byte) RandomUtils.nextLong()).toString(); + String data = Arrays.toString(generateData(5 * 1024 * 1024, + (byte) RandomUtils.nextLong())); OzoneOutputStream out = bucket.createKey(keyName, - data.getBytes().length, ReplicationType.RATIS, + data.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>()); - out.write(data.getBytes()); + out.write(data.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; + byte[] fileContent = new byte[data.getBytes(UTF_8).length]; is.read(fileContent); is.close(); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.RATIS, THREE)); - Assert.assertEquals(data, new String(fileContent)); + Assert.assertEquals(data, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); Assert.assertFalse(key.getModificationTime().isBefore(testStartTime)); } @@ -1283,9 +1283,9 @@ private void createAndCorruptKey(String volumeName, String bucketName, // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); // We need to find the location of the chunk file corresponding to the @@ -1342,7 +1342,7 @@ private void readKey(OzoneBucket bucket, String keyName, String data) OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; + byte[] fileContent = new byte[data.getBytes(UTF_8).length]; is.read(fileContent); is.close(); } @@ -1361,9 +1361,9 @@ public void testGetKeyDetails() throws IOException { //String keyValue = "this is a test value.glx"; // create the initial key with size 0, write will allocate the first block. OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes().length, STAND_ALONE, + keyValue.getBytes(UTF_8).length, STAND_ALONE, ONE, new HashMap<>()); - out.write(keyValue.getBytes()); + out.write(keyValue.getBytes(UTF_8)); out.close(); OzoneInputStream is = bucket.readKey(keyName); @@ -1387,7 +1387,7 @@ public void testGetKeyDetails() throws IOException { Assert.assertEquals(localID, keyLocations.get(0).getLocalID()); // Make sure that the data size matched. - Assert.assertEquals(keyValue.getBytes().length, + Assert.assertEquals(keyValue.getBytes(UTF_8).length, keyLocations.get(0).getLength()); // Second, sum the data size from chunks in Container via containerID @@ -1424,7 +1424,7 @@ public void testGetKeyDetails() throws IOException { for (ContainerProtos.ChunkInfo chunk : chunks) { length += chunk.getLen(); } - Assert.assertEquals(length, keyValue.getBytes().length); + Assert.assertEquals(length, keyValue.getBytes(UTF_8).length); break; } } @@ -1449,9 +1449,9 @@ public void testReadKeyWithCorruptedData() throws IOException { // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); // We need to find the location of the chunk file corresponding to the @@ -1494,7 +1494,7 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { String bucketName = UUID.randomUUID().toString(); String value = "sample value"; - byte[] data = value.getBytes(); + byte[] data = value.getBytes(UTF_8); store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); volume.createBucket(bucketName); @@ -1503,9 +1503,9 @@ public void testReadKeyWithCorruptedDataWithMutiNodes() throws IOException { // Write data into a key OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); // We need to find the location of the chunk file corresponding to the @@ -1595,7 +1595,7 @@ private void corruptData(Container container, OzoneKey key) container.getContainerData().getVolume().getHddsRootDir().getPath(); File chunksLocationPath = KeyValueContainerLocationUtil .getChunksLocationPath(containreBaseDir, scmId, containerID); - byte[] corruptData = "corrupted data".getBytes(); + byte[] corruptData = "corrupted data".getBytes(UTF_8); // Corrupt the contents of chunk files for (File file : FileUtils.listFiles(chunksLocationPath, null, false)) { FileUtils.writeByteArrayToFile(file, corruptData); @@ -1616,9 +1616,9 @@ public void testDeleteKey() volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, STAND_ALONE, + value.getBytes(UTF_8).length, STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); @@ -1890,7 +1890,7 @@ public void testListKey() */ String keyBaseA = "key-a-"; for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); OzoneOutputStream one = volAbucketA.createKey( keyBaseA + i + "-" + RandomStringUtils.randomNumeric(5), value.length, STAND_ALONE, ONE, @@ -1923,7 +1923,7 @@ public void testListKey() */ String keyBaseB = "key-b-"; for (int i = 0; i < 10; i++) { - byte[] value = RandomStringUtils.randomAscii(10240).getBytes(); + byte[] value = RandomStringUtils.randomAscii(10240).getBytes(UTF_8); OzoneOutputStream one = volAbucketA.createKey( keyBaseB + i + "-" + RandomStringUtils.randomNumeric(5), value.length, STAND_ALONE, ONE, @@ -2482,9 +2482,9 @@ public void testCommitPartAfterCompleteUpload() throws Exception { StringBuilder sb = new StringBuilder(data.length); // Combine all parts data, and check is it matching with get key data. - String part1 = new String(data); + String part1 = new String(data, UTF_8); sb.append(part1); - Assert.assertEquals(sb.toString(), new String(fileContent)); + Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8)); try { ozoneOutputStream.close(); @@ -3059,7 +3059,7 @@ private void validateOzoneAccessAcl(OzoneObj ozObj) throws IOException { private void writeKey(String key1, OzoneBucket bucket) throws IOException { OzoneOutputStream out = bucket.createKey(key1, 1024, STAND_ALONE, ONE, new HashMap<>()); - out.write(RandomStringUtils.random(1024).getBytes()); + out.write(RandomStringUtils.random(1024).getBytes(UTF_8)); out.close(); } @@ -3116,12 +3116,12 @@ private void doMultipartUpload(OzoneBucket bucket, String keyName, byte val) StringBuilder sb = new StringBuilder(length); // Combine all parts data, and check is it matching with get key data. - String part1 = new String(data); - String part2 = new String(data); + String part1 = new String(data, UTF_8); + String part2 = new String(data, UTF_8); sb.append(part1); sb.append(part2); sb.append(part3); - Assert.assertEquals(sb.toString(), new String(fileContent)); + Assert.assertEquals(sb.toString(), new String(fileContent, UTF_8)); } @@ -3170,9 +3170,9 @@ private void completeMultipartUpload(OzoneBucket bucket, String keyName, private void createTestKey(OzoneBucket bucket, String keyName, String keyValue) throws IOException { OzoneOutputStream out = bucket.createKey(keyName, - keyValue.getBytes().length, STAND_ALONE, + keyValue.getBytes(UTF_8).length, STAND_ALONE, ONE, new HashMap<>()); - out.write(keyValue.getBytes()); + out.write(keyValue.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); @@ -3229,8 +3229,8 @@ public void testKeyReadWriteForGDPR() throws Exception { Map keyMetadata = new HashMap<>(); keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes().length, STAND_ALONE, ONE, keyMetadata); - out.write(text.getBytes()); + text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata); + out.write(text.getBytes(UTF_8)); out.close(); Assert.assertNull(keyMetadata.get(OzoneConsts.GDPR_SECRET)); @@ -3244,12 +3244,12 @@ public void testKeyReadWriteForGDPR() throws Exception { Assert.assertNotNull(key.getMetadata().get(OzoneConsts.GDPR_SECRET)); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[text.getBytes().length]; + byte[] fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, STAND_ALONE, ONE)); - Assert.assertEquals(text, new String(fileContent)); + Assert.assertEquals(text, new String(fileContent, UTF_8)); //Step 4 OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); @@ -3267,11 +3267,11 @@ public void testKeyReadWriteForGDPR() throws Exception { Assert.assertEquals(keyName, key.getName()); Assert.assertNull(key.getMetadata().get(OzoneConsts.GDPR_FLAG)); is = bucket.readKey(keyName); - fileContent = new byte[text.getBytes().length]; + fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); //Step 6 - Assert.assertNotEquals(text, new String(fileContent)); + Assert.assertNotEquals(text, new String(fileContent, UTF_8)); } @@ -3310,8 +3310,8 @@ public void testDeletedKeyForGDPR() throws Exception { Map keyMetadata = new HashMap<>(); keyMetadata.put(OzoneConsts.GDPR_FLAG, "true"); OzoneOutputStream out = bucket.createKey(keyName, - text.getBytes().length, STAND_ALONE, ONE, keyMetadata); - out.write(text.getBytes()); + text.getBytes(UTF_8).length, STAND_ALONE, ONE, keyMetadata); + out.write(text.getBytes(UTF_8)); out.close(); //Step 3 @@ -3324,12 +3324,12 @@ public void testDeletedKeyForGDPR() throws Exception { Assert.assertTrue(key.getMetadata().get(OzoneConsts.GDPR_SECRET) != null); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[text.getBytes().length]; + byte[] fileContent = new byte[text.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, STAND_ALONE, ONE)); - Assert.assertEquals(text, new String(fileContent)); + Assert.assertEquals(text, new String(fileContent, UTF_8)); //Step 4 bucket.deleteKey(keyName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index 10400b3ef98..466414a1825 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -42,6 +42,7 @@ import org.junit.BeforeClass; import org.junit.Test; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; import static org.junit.Assert.fail; @@ -97,9 +98,9 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { // Write data into a key try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>())) { - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); } // Since the rpc client is outside of cluster, then getFirstNode should be @@ -110,18 +111,18 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { // read key with topology aware read enabled try (OzoneInputStream is = bucket.readKey(keyName)) { - byte[] b = new byte[value.getBytes().length]; + byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); + Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read key should succeed"); } // read file with topology aware read enabled try (OzoneInputStream is = bucket.readKey(keyName)) { - byte[] b = new byte[value.getBytes().length]; + byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); + Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read file should succeed"); } @@ -134,18 +135,18 @@ public void testGetKeyAndFileWithNetworkTopology() throws IOException { OzoneBucket newBucket = newStore.getVolume(volumeName).getBucket(bucketName); try (OzoneInputStream is = newBucket.readKey(keyName)) { - byte[] b = new byte[value.getBytes().length]; + byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); + Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read key should succeed"); } // read file with topology aware read disabled try (OzoneInputStream is = newBucket.readFile(keyName)) { - byte[] b = new byte[value.getBytes().length]; + byte[] b = new byte[value.getBytes(UTF_8).length]; is.read(b); - Assert.assertTrue(Arrays.equals(b, value.getBytes())); + Assert.assertTrue(Arrays.equals(b, value.getBytes(UTF_8))); } catch (OzoneChecksumException e) { fail("Read file should succeed"); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 914845931df..af951909a74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -53,6 +53,8 @@ import org.junit.AfterClass; import org.junit.Assert; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.fail; import org.junit.BeforeClass; import org.junit.Rule; @@ -141,12 +143,12 @@ public void testPutKeyAndGetKeyThreeNodes() String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket - .createKey(keyName, value.getBytes().length, ReplicationType.RATIS, + .createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); KeyOutputStream groupOutputStream = (KeyOutputStream) out.getOutputStream(); XceiverClientFactory factory = groupOutputStream.getXceiverClientFactory(); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); // First, confirm the key info from the client matches the info in OM. OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); @@ -165,8 +167,8 @@ public void testPutKeyAndGetKeyThreeNodes() Assert.assertEquals(localID, keyLocations.get(0).getLocalID()); // Make sure that the data size matched. - Assert - .assertEquals(value.getBytes().length, keyLocations.get(0).getLength()); + Assert.assertEquals(value.getBytes(UTF_8).length, + keyLocations.get(0).getLength()); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager().getContainer(ContainerID.valueof(containerID)); @@ -213,7 +215,7 @@ private void readKey(OzoneBucket bucket, String keyName, String data) OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; + byte[] fileContent = new byte[data.getBytes(UTF_8).length]; is.read(fileContent); is.close(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java index 72ce91af675..f3ff90a0713 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestSecureOzoneRpcClient.java @@ -62,8 +62,7 @@ import java.util.HashMap; import java.util.UUID; -import org.junit.Rule; -import org.junit.rules.Timeout; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; /** @@ -71,12 +70,6 @@ */ public class TestSecureOzoneRpcClient extends TestOzoneRpcClient { - /** - * Set a timeout for each test. - */ - @Rule - public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster = null; private static OzoneClient ozClient = null; private static ObjectStore store = null; @@ -159,23 +152,23 @@ public void testPutKeySuccessWithBlockToken() throws Exception { String keyName = UUID.randomUUID().toString(); try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>())) { - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); } OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); byte[] fileContent; try(OzoneInputStream is = bucket.readKey(keyName)) { - fileContent = new byte[value.getBytes().length]; + fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); } Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, ReplicationType.STAND_ALONE, ReplicationFactor.ONE)); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); Assert.assertFalse(key.getModificationTime().isBefore(testStartTime)); } @@ -203,11 +196,11 @@ public void testKeyOpFailureWithoutBlockToken() throws Exception { String keyName = UUID.randomUUID().toString(); try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.STAND_ALONE, + value.getBytes(UTF_8).length, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>())) { LambdaTestUtils.intercept(IOException.class, "UNAUTHENTICATED: Fail " + "to find any token ", - () -> out.write(value.getBytes())); + () -> out.write(value.getBytes(UTF_8))); } OzoneKey key = bucket.getKey(keyName); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java index 483823c78d1..430f2432173 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestValidateBCSIDOnRestart.java @@ -53,12 +53,12 @@ import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import org.apache.ratis.server.storage.FileInfo; import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; import org.junit.AfterClass; import org.junit.Assert; @@ -152,9 +152,9 @@ public void testValidateBCSIDOnDnRestart() throws Exception { .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.flush(); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); KeyOutputStream groupOutputStream = (KeyOutputStream) key.getOutputStream(); List locationInfoList = groupOutputStream.getLocationInfoList(); @@ -195,7 +195,6 @@ public void testValidateBCSIDOnDnRestart() throws Exception { // Since the snapshot threshold is set to 1, since there are // applyTransactions, we should see snapshots Assert.assertTrue(parentPath.getParent().toFile().listFiles().length > 0); - FileInfo snapshot = storage.findLatestSnapshot().getFile(); // make sure the missing containerSet is not empty HddsDispatcher dispatcher = (HddsDispatcher) ozoneContainer.getDispatcher(); @@ -208,7 +207,7 @@ public void testValidateBCSIDOnDnRestart() throws Exception { .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); // First write and flush creates a container in the datanode - key.write("ratis1".getBytes()); + key.write("ratis1".getBytes(UTF_8)); key.flush(); groupOutputStream = (KeyOutputStream) key.getOutputStream(); locationInfoList = groupOutputStream.getLocationInfoList(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java index 5e5ce74f216..c9fcd316e4f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java @@ -72,18 +72,18 @@ */ public class TestWatchForCommit { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static OzoneClient client; - private static ObjectStore objectStore; - private static String volumeName; - private static String bucketName; - private static String keyString; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private MiniOzoneCluster cluster; + private OzoneConfiguration conf; + private OzoneClient client; + private ObjectStore objectStore; + private String volumeName; + private String bucketName; + private String keyString; + private int chunkSize; + private int flushSize; + private int maxFlushSize; + private int blockSize; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index 428491ab79c..c8811c17d63 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -80,6 +80,7 @@ import java.util.concurrent.TimeoutException; import static java.lang.Math.max; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL; import static org.apache.hadoop.hdds .HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; @@ -169,10 +170,11 @@ public void testBlockDeletion() throws Exception { String keyName = UUID.randomUUID().toString(); - OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, - ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, ReplicationType.RATIS, + ReplicationFactor.THREE, new HashMap<>()); for (int i = 0; i < 100; i++) { - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); } out.close(); @@ -252,9 +254,10 @@ public void testContainerStatisticsAfterDelete() throws Exception { OzoneBucket bucket = volume.getBucket(bucketName); String keyName = UUID.randomUUID().toString(); - OzoneOutputStream out = bucket.createKey(keyName, value.getBytes().length, - ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); - out.write(value.getBytes()); + OzoneOutputStream out = bucket.createKey(keyName, + value.getBytes(UTF_8).length, ReplicationType.RATIS, + ReplicationFactor.THREE, new HashMap<>()); + out.write(value.getBytes(UTF_8)); out.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName) @@ -268,7 +271,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { Thread.sleep(5000); List containerInfos = scm.getContainerManager().getContainers(); - final int valueSize = value.getBytes().length; + final int valueSize = value.getBytes(UTF_8).length; final int keyCount = 1; containerInfos.stream().forEach(container -> { Assert.assertEquals(valueSize, container.getUsedBytes()); @@ -360,8 +363,7 @@ private void waitForDatanodeCommandRetry() } private void verifyTransactionsCommitted() throws IOException { - DeletedBlockLogImpl deletedBlockLog = - (DeletedBlockLogImpl) scm.getScmBlockManager().getDeletedBlockLog(); + scm.getScmBlockManager().getDeletedBlockLog(); for (long txnID = 1; txnID <= maxTransactionId; txnID++) { Assert.assertNull( scm.getScmMetadataStore().getDeletedBlocksTXTable().get(txnID)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index bb2d57f2224..31095956707 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -53,6 +53,7 @@ import java.util.List; import java.util.concurrent.TimeoutException; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; /** @@ -107,7 +108,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - key.write(keyName.getBytes()); + key.write(keyName.getBytes(UTF_8)); key.close(); //get the name of a valid container @@ -161,7 +162,7 @@ public void testCloseContainerViaStandAlone() OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") .createKey("standalone", 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - key.write("standalone".getBytes()); + key.write("standalone".getBytes(UTF_8)); key.close(); //get the name of a valid container @@ -217,7 +218,7 @@ public void testCloseContainerViaRatis() throws IOException, OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") .createKey("ratis", 1024, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); - key.write("ratis".getBytes()); + key.write("ratis".getBytes(UTF_8)); key.close(); //get the name of a valid container @@ -278,7 +279,7 @@ public void testQuasiCloseTransitionViaRatis() OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") .createKey(keyName, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - key.write(keyName.getBytes()); + key.write(keyName.getBytes(UTF_8)); key.close(); OmKeyArgs keyArgs = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 831c7291536..2aa72560f27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -39,6 +39,7 @@ import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.test.GenericTestUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import org.junit.After; @@ -91,7 +92,7 @@ public void test() throws Exception { OzoneOutputStream key = objectStore.getVolume("test").getBucket("test") .createKey("test", 1024, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); - key.write("test".getBytes()); + key.write("test".getBytes(UTF_8)); key.close(); //get the name of a valid container diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 28b58d9da02..9440fc7828c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -51,6 +51,8 @@ import org.junit.Rule; import org.junit.rules.Timeout; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; /** @@ -228,7 +230,7 @@ private void createKey(String keyName) throws IOException { .getBucket(bucketName) .createKey(keyName, 1024, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); - key.write("test".getBytes()); + key.write("test".getBytes(UTF_8)); key.close(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java index e6fae22efbf..4911f950671 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/TestCSMMetrics.java @@ -49,7 +49,6 @@ import org.apache.hadoop.ozone.container.common.transport.server .XceiverServerSpi; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -97,7 +96,6 @@ static void runContainerStateMachineMetrics( throws Exception { final List servers = new ArrayList<>(); XceiverClientSpi client = null; - String containerName = OzoneUtils.getRequestID(); try { final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes); final OzoneConfiguration conf = new OzoneConfiguration(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java index bbb823265af..af6785341a0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java @@ -58,7 +58,6 @@ import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; -import org.apache.hadoop.ozone.web.utils.OzoneUtils; import org.apache.hadoop.test.GenericTestUtils; import com.google.common.collect.Maps; @@ -93,9 +92,6 @@ public static void setup() { @Test public void testClientServer() throws Exception { DatanodeDetails datanodeDetails = randomDatanodeDetails(); - ContainerSet containerSet = new ContainerSet(); - ContainerController controller = new ContainerController( - containerSet, null); runTestClientServer(1, (pipeline, conf) -> conf .setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getFirstNode() @@ -156,7 +152,6 @@ static void runTestClientServer( throws Exception { final List servers = new ArrayList<>(); XceiverClientSpi client = null; - String containerName = OzoneUtils.getRequestID(); try { final Pipeline pipeline = MockPipeline.createPipeline(numDatanodes); @@ -178,7 +173,7 @@ static void runTestClientServer( ContainerTestHelper.getTestContainerID(), pipeline); Assert.assertNotNull(request.getTraceID()); - ContainerCommandResponseProto response = client.sendCommand(request); + client.sendCommand(request); } finally { if (client != null) { client.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java index acede34110e..2fdcf39200d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java @@ -131,9 +131,6 @@ public void cleanUp() { @Test public void testClientServer() throws Exception { DatanodeDetails dd = MockDatanodeDetails.randomDatanodeDetails(); - ContainerSet containerSet = new ContainerSet(); - ContainerController controller = new ContainerController( - containerSet, null); HddsDispatcher hddsDispatcher = createDispatcher(dd, UUID.randomUUID(), CONF); runTestClientServer(1, (pipeline, conf) -> conf diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java index d6c2cd498b2..dbe9598ff9c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/ratis/TestDnRatisLogParser.java @@ -20,6 +20,7 @@ import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; +import java.nio.charset.StandardCharsets; import java.util.UUID; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -35,6 +36,8 @@ import org.junit.Test; import org.junit.rules.Timeout; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test Datanode Ratis log parser. */ @@ -46,7 +49,7 @@ public class TestDnRatisLogParser { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster = null; + private MiniOzoneCluster cluster = null; private final ByteArrayOutputStream out = new ByteArrayOutputStream(); private final ByteArrayOutputStream err = new ByteArrayOutputStream(); @@ -56,8 +59,8 @@ public void setup() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(1).setTotalPipelineNumLimit(2).build(); cluster.waitForClusterToBeReady(); - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); + System.setOut(new PrintStream(out, false, UTF_8.name())); + System.setErr(new PrintStream(err, false, UTF_8.name())); } @After @@ -89,6 +92,7 @@ public void testRatisLogParsing() throws Exception { datanodeRatisLogParser.setSegmentFile(logFile); datanodeRatisLogParser.parseRatisLogs( DatanodeRatisLogParser::smToContainerLogString); - Assert.assertTrue(out.toString().contains("Num Total Entries:")); + Assert.assertTrue(out.toString(StandardCharsets.UTF_8.name()) + .contains("Num Total Entries:")); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java index 631d9448ce0..1c856833a23 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java @@ -66,6 +66,8 @@ import org.junit.Rule; import org.junit.rules.Timeout; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.client.ReplicationType.STAND_ALONE; @@ -134,19 +136,19 @@ public void testOpenContainerIntegrity() throws Exception { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, STAND_ALONE, + value.getBytes(UTF_8).length, STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OzoneKey key = bucket.getKey(keyName); Assert.assertEquals(keyName, key.getName()); OzoneInputStream is = bucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; + byte[] fileContent = new byte[value.getBytes(UTF_8).length]; is.read(fileContent); Assert.assertTrue(verifyRatisReplication(volumeName, bucketName, keyName, STAND_ALONE, ONE)); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); Assert.assertFalse(key.getCreationTime().isBefore(testStartTime)); Assert.assertFalse(key.getModificationTime().isBefore(testStartTime)); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 7f049a3f658..a1fa79a994b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -47,6 +47,8 @@ import java.util.HashMap; import java.util.Set; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * This class tests container report with DN container state info. */ @@ -107,7 +109,7 @@ public void testContainerReportKeyWrite() throws Exception { .createKey(keyName, keySize, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); String dataString = RandomStringUtils.randomAlphabetic(keySize); - key.write(dataString.getBytes()); + key.write(dataString.getBytes(UTF_8)); key.close(); OmKeyArgs keyArgs = new OmKeyArgs.Builder() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index e3f1c67ebe3..1a5954c8f64 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -305,13 +305,15 @@ public void openKeyWithMultipleBlocks() throws IOException { @Test public void testCreateDirectory() throws IOException { // Create directory where the parent directory does not exist - String keyName = RandomStringUtils.randomAlphabetic(5); + StringBuffer keyNameBuf = new StringBuffer(); + keyNameBuf.append(RandomStringUtils.randomAlphabetic(5)); OmKeyArgs keyArgs = createBuilder() - .setKeyName(keyName) + .setKeyName(keyNameBuf.toString()) .build(); for (int i =0; i< 5; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); + keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5)); } + String keyName = keyNameBuf.toString(); keyManager.createDirectory(keyArgs); Path path = Paths.get(keyName); while (path != null) { @@ -329,9 +331,6 @@ public void testCreateDirectory() throws IOException { keyArgs.setLocationInfoList( keySession.getKeyInfo().getLatestVersionLocations().getLocationList()); keyManager.commitKey(keyArgs, keySession.getId()); - for (int i =0; i< 5; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } try { keyManager.createDirectory(keyArgs); Assert.fail("Creation should fail for directory."); @@ -387,10 +386,12 @@ public void testOpenFile() throws IOException { // try to create a file where parent directories do not exist and // recursive flag is set to false - keyName = RandomStringUtils.randomAlphabetic(5); + StringBuffer keyNameBuf = new StringBuffer(); + keyNameBuf.append(RandomStringUtils.randomAlphabetic(5)); for (int i =0; i< 5; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); + keyNameBuf.append("/").append(RandomStringUtils.randomAlphabetic(5)); } + keyName = keyNameBuf.toString(); keyArgs = createBuilder() .setKeyName(keyName) .build(); @@ -612,7 +613,6 @@ public void testInvalidPrefixAcl() throws IOException { // get acl with invalid prefix name exception.expect(OMException.class); exception.expectMessage("Invalid prefix name"); - ozAclGet = prefixManager.getAcl(ozInvalidPrefix); Assert.assertEquals(null, ozAcl1); // set acl with invalid prefix name diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index e4b6b53694b..2b4b471aebc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -58,9 +58,9 @@ public class TestKeyPurging { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneCluster cluster; - private static ObjectStore store; - private static OzoneManager om; + private MiniOzoneCluster cluster; + private ObjectStore store; + private OzoneManager om; private static final int NUM_KEYS = 10; private static final int KEY_SIZE = 100; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index 2c66885e353..2a52c96eaf7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -27,8 +27,9 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; +import java.io.OutputStreamWriter; +import java.nio.charset.StandardCharsets; import java.nio.file.Path; import java.nio.file.Paths; import java.util.UUID; @@ -191,12 +192,14 @@ public void testWriteCheckpointToOutputStream() throws Exception { try { String testDirName = folder.newFolder().getAbsolutePath(); File file = new File(testDirName + "/temp1.txt"); - FileWriter writer = new FileWriter(file); + OutputStreamWriter writer = new OutputStreamWriter( + new FileOutputStream(file), StandardCharsets.UTF_8); writer.write("Test data 1"); writer.close(); file = new File(testDirName + "/temp2.txt"); - writer = new FileWriter(file); + writer = new OutputStreamWriter( + new FileOutputStream(file), StandardCharsets.UTF_8); writer.write("Test data 2"); writer.close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java index f6f017fea50..19fea854418 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java @@ -43,6 +43,7 @@ import org.junit.Test; import org.junit.rules.Timeout; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OmUtils.EPOCH_ID_SHIFT; import static org.apache.hadoop.ozone.OmUtils.EPOCH_WHEN_RATIS_NOT_ENABLED; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; @@ -135,7 +136,7 @@ public void testUniqueTrxnIndexOnOMRestart() throws Exception { OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName) .createKey(keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(data.getBytes(), 0, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); // Verify last transactionIndex is updated after key creation and the diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java index 54994d8d718..72bc510d01f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java @@ -260,7 +260,6 @@ public void testInstallCorruptedCheckpointFailure() throws Exception { followerNodeId = leaderOM.getPeerNodes().get(1).getOMNodeId(); } OzoneManager followerOM = cluster.getOzoneManager(followerNodeId); - OzoneManagerRatisServer followerRatisServer = followerOM.getOmRatisServer(); // Do some transactions so that the log index increases writeKeysToIncreaseLogIndex(leaderRatisServer, 100); @@ -311,7 +310,7 @@ private List writeKeysToIncreaseLogIndex( return keys; } - private class DummyExitManager extends ExitManager { + private static class DummyExitManager extends ExitManager { @Override public void exitSystem(int status, String message, Throwable throwable, Logger log) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index e82fae1171d..ce673e00aeb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -30,7 +30,6 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; import org.apache.hadoop.ozone.om.helpers.OpenKeySession; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.commons.lang3.RandomStringUtils; import org.junit.AfterClass; @@ -87,9 +86,6 @@ public static void shutdown() { @Test public void testAllocateCommit() throws Exception { - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - String userName = ugi.getUserName(); - String adminName = ugi.getUserName(); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); @@ -180,8 +176,6 @@ private OmKeyLocationInfoGroup checkVersions( @Test public void testReadLatestVersion() throws Exception { - String userName = "user" + RandomStringUtils.randomNumeric(5); - String adminName = "admin" + RandomStringUtils.randomNumeric(5); String volumeName = "volume" + RandomStringUtils.randomNumeric(5); String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); String keyName = "key" + RandomStringUtils.randomNumeric(5); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java index 42f81269078..ea39a1f77c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java @@ -37,6 +37,7 @@ import java.util.List; import java.util.ArrayList; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION; @@ -49,7 +50,7 @@ public class TestOmLDBCli { private RDBParser rdbParser; private DBScanner dbScanner; private DBStore dbStore = null; - private static List keyNames; + private List keyNames; @Rule public TemporaryFolder folder = new TemporaryFolder(); @@ -89,7 +90,7 @@ public void testOMDB() throws Exception { String key = "key"+ (i); Table keyTable = dbStore.getTable("keyTable"); byte[] arr = value.getProtobuf(CURRENT_VERSION).toByteArray(); - keyTable.put(key.getBytes(), arr); + keyTable.put(key.getBytes(UTF_8), arr); } rdbParser.setDbPath(dbStore.getDbLocation().getAbsolutePath()); dbScanner.setParent(rdbParser); @@ -108,13 +109,13 @@ public void testOMDB() throws Exception { } } - private static List getKeyNames(DBScanner dbScanner) + private List getKeyNames(DBScanner scanner) throws Exception { keyNames.clear(); - dbScanner.setTableName("keyTable"); - dbScanner.call(); - Assert.assertFalse(dbScanner.getScannedObjects().isEmpty()); - for (Object o : dbScanner.getScannedObjects()){ + scanner.setTableName("keyTable"); + scanner.call(); + Assert.assertFalse(scanner.getScannedObjects().isEmpty()); + for (Object o : scanner.getScannedObjects()){ OmKeyInfo keyInfo = (OmKeyInfo)o; keyNames.add(keyInfo.getKeyName()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index effe32f83cc..d60366cbc58 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -51,6 +51,7 @@ import java.util.UUID; import java.util.HashMap; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; @@ -194,7 +195,7 @@ static String createKey(OzoneBucket ozoneBucket) throws IOException { OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, data.length(), ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(data.getBytes(), 0, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); return keyName; } @@ -304,7 +305,7 @@ protected void testCreateFile(OzoneBucket ozoneBucket, String keyName, data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, overwrite, recursive); - ozoneOutputStream.write(data.getBytes(), 0, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName); @@ -317,9 +318,9 @@ protected void testCreateFile(OzoneBucket ozoneBucket, String keyName, OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - byte[] fileContent = new byte[data.getBytes().length]; + byte[] fileContent = new byte[data.getBytes(UTF_8).length]; ozoneInputStream.read(fileContent); - Assert.assertEquals(data, new String(fileContent)); + Assert.assertEquals(data, new String(fileContent, UTF_8)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index 3a3d82be9d0..f340c92f054 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -45,6 +45,7 @@ import java.util.Map; import java.util.UUID; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.DIRECTORY_NOT_FOUND; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_ALREADY_EXISTS; @@ -167,7 +168,6 @@ public void testKeysDelete() throws Exception { testCreateFile(ozoneBucket, keyName2, data, true, false); testCreateFile(ozoneBucket, keyName3, data, true, false); testCreateFile(ozoneBucket, keyName4, data, true, false); - ozoneBucket.getKey("dir/file1").getName(); // Delete keyName1 use deleteKey api. ozoneBucket.deleteKey(keyName1); @@ -335,7 +335,7 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, String value = "random data"; OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), 1, uploadID); - ozoneOutputStream.write(value.getBytes(), 0, value.length()); + ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); ozoneOutputStream.close(); @@ -350,9 +350,9 @@ private void createMultipartKeyAndReadKey(OzoneBucket ozoneBucket, OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; + byte[] fileContent = new byte[value.getBytes(UTF_8).length]; ozoneInputStream.read(fileContent); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); } @@ -388,14 +388,14 @@ private void createKeyTest(boolean checkSuccess) throws Exception { OzoneOutputStream ozoneOutputStream = ozoneBucket.createKey(keyName, value.length(), ReplicationType.STAND_ALONE, ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(value.getBytes(), 0, value.length()); + ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); ozoneOutputStream.close(); OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - byte[] fileContent = new byte[value.getBytes().length]; + byte[] fileContent = new byte[value.getBytes(UTF_8).length]; ozoneInputStream.read(fileContent); - Assert.assertEquals(value, new String(fileContent)); + Assert.assertEquals(value, new String(fileContent, UTF_8)); } catch (ConnectException | RemoteException e) { if (!checkSuccess) { @@ -652,7 +652,7 @@ private String createMultipartUploadPartKey(OzoneBucket ozoneBucket, String value = "random data"; OzoneOutputStream ozoneOutputStream = ozoneBucket.createMultipartKey( keyName, value.length(), partNumber, uploadID); - ozoneOutputStream.write(value.getBytes(), 0, value.length()); + ozoneOutputStream.write(value.getBytes(UTF_8), 0, value.length()); ozoneOutputStream.close(); return ozoneOutputStream.getCommitUploadPartInfo().getPartName(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java index bdaca532233..6ea3fa2253e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java @@ -36,6 +36,7 @@ import org.apache.commons.lang3.RandomStringUtils; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS; @@ -191,7 +192,7 @@ public void testRestartOMWithKeyOperation() throws Exception { data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(data.getBytes(), 0, data.length()); + ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); ozoneOutputStream.close(); cluster.restartOzoneManager(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java index 8f9235d281a..2d58ffbfda7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java @@ -73,11 +73,11 @@ public class TestScmSafeMode { private static final Logger LOG = LoggerFactory .getLogger(TestScmSafeMode.class); - private static MiniOzoneCluster cluster = null; - private static MiniOzoneCluster.Builder builder = null; - private static OzoneConfiguration conf; - private static OzoneManager om; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private MiniOzoneCluster cluster = null; + private MiniOzoneCluster.Builder builder = null; + private OzoneConfiguration conf; + private OzoneManager om; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java index 71e438f108d..1aeb3e4bb2a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/parser/TestOMRatisLogParser.java @@ -37,6 +37,8 @@ import org.junit.Rule; import org.junit.rules.Timeout; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; /** @@ -50,7 +52,7 @@ public class TestOMRatisLogParser { @Rule public Timeout timeout = new Timeout(300000); - private static MiniOzoneHAClusterImpl cluster = null; + private MiniOzoneHAClusterImpl cluster = null; private final ByteArrayOutputStream out = new ByteArrayOutputStream(); private final ByteArrayOutputStream err = new ByteArrayOutputStream(); @@ -70,8 +72,8 @@ public void setup() throws Exception { ObjectStore objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) .getObjectStore(); performFewRequests(objectStore); - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); + System.setOut(new PrintStream(out, false, UTF_8.name())); + System.setErr(new PrintStream(err, false, UTF_8.name())); } private void performFewRequests(ObjectStore objectStore) throws Exception { @@ -122,6 +124,7 @@ public void testRatisLogParsing() throws Exception { // Not checking total entry count, because of not sure of exact count of // metadata entry changes. - Assert.assertTrue(out.toString().contains("Num Total Entries:")); + Assert.assertTrue(out.toString(UTF_8.name()) + .contains("Num Total Entries:")); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index 844c859ac02..3e2570c6ff7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -26,7 +26,6 @@ import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; @@ -107,7 +106,6 @@ public void testDownloadCheckpoint() throws Exception { OzoneVolume retVolumeinfo = objectStore.getVolume(volumeName); retVolumeinfo.createBucket(bucketName); - OzoneBucket ozoneBucket = retVolumeinfo.getBucket(bucketName); String leaderOMNodeId = OmFailoverProxyUtil .getFailoverProxyProvider(objectStore.getClientProxy()) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 2ff79de0750..64c759b8c7e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.recon; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OM_DB_CHECKPOINT_HTTP_ENDPOINT; import java.util.HashMap; @@ -56,8 +57,8 @@ public class TestReconWithOzoneManagerHA { private MiniOzoneHAClusterImpl cluster; private ObjectStore objectStore; - private final String omServiceId = "omService1"; - private final String volName = "testrecon"; + private static final String OM_SERVICE_ID = "omService1"; + private static final String VOL_NAME = "testrecon"; @Before public void setup() throws Exception { @@ -72,16 +73,16 @@ public void setup() throws Exception { cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setClusterId(UUID.randomUUID().toString()) .setScmId(UUID.randomUUID().toString()) - .setOMServiceId(omServiceId) + .setOMServiceId(OM_SERVICE_ID) .setNumDatanodes(1) .setNumOfOzoneManagers(3) .includeRecon(true) .build(); cluster.waitForClusterToBeReady(); - objectStore = OzoneClientFactory.getRpcClient(omServiceId, conf) + objectStore = OzoneClientFactory.getRpcClient(OM_SERVICE_ID, conf) .getObjectStore(); - objectStore.createVolume(volName); - objectStore.getVolume(volName).createBucket(volName); + objectStore.createVolume(VOL_NAME); + objectStore.getVolume(VOL_NAME).createBucket(VOL_NAME); } @After @@ -119,11 +120,11 @@ public void testReconGetsSnapshotFromLeader() throws Exception { expectedUrl, snapshotUrl); // Write some data String keyPrefix = "ratis"; - OzoneOutputStream key = objectStore.getVolume(volName) - .getBucket(volName) + OzoneOutputStream key = objectStore.getVolume(VOL_NAME) + .getBucket(VOL_NAME) .createKey(keyPrefix, 1024, ReplicationType.RATIS, ReplicationFactor.ONE, new HashMap<>()); - key.write(keyPrefix.getBytes()); + key.write(keyPrefix.getBytes(UTF_8)); key.flush(); key.close(); // Sync data to Recon @@ -140,7 +141,7 @@ public void testReconGetsSnapshotFromLeader() throws Exception { reconKeyPrefix = keyValue.getKey().getKeyPrefix(); } Assert.assertEquals("Container data should be synced to recon.", - String.format("/%s/%s/%s", volName, volName, keyPrefix), + String.format("/%s/%s/%s", VOL_NAME, VOL_NAME, keyPrefix), reconKeyPrefix); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java index 31695134301..08a993f615d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java @@ -44,6 +44,8 @@ import org.junit.rules.ExpectedException; import org.junit.rules.Timeout; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test Container calls. */ @@ -98,7 +100,7 @@ public void testAllocateWrite() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID( container.getContainerInfo().getContainerID()); ContainerProtocolCalls.writeSmallFile(client, blockID, - "data123".getBytes(), null); + "data123".getBytes(UTF_8), null); ContainerProtos.GetSmallFileResponseProto response = ContainerProtocolCalls.readSmallFile(client, blockID, null); String readData = response.getData().getData().toStringUtf8(); @@ -142,7 +144,7 @@ public void testInvalidContainerRead() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID( container.getContainerInfo().getContainerID()); ContainerProtocolCalls.writeSmallFile(client, blockID, - "data123".getBytes(), null); + "data123".getBytes(UTF_8), null); thrown.expect(StorageContainerException.class); thrown.expectMessage("ContainerID 8888 does not exist"); @@ -170,7 +172,7 @@ public void testReadWriteWithBCSId() throws Exception { container.getContainerInfo().getContainerID()); ContainerProtos.PutSmallFileResponseProto responseProto = ContainerProtocolCalls - .writeSmallFile(client, blockID1, "data123".getBytes(), null); + .writeSmallFile(client, blockID1, "data123".getBytes(UTF_8), null); long bcsId = responseProto.getCommittedBlockLength().getBlockID() .getBlockCommitSequenceId(); try { @@ -188,7 +190,7 @@ public void testReadWriteWithBCSId() throws Exception { BlockID blockID2 = ContainerTestHelper .getTestBlockID(container.getContainerInfo().getContainerID()); ContainerProtocolCalls - .writeSmallFile(client, blockID2, "data123".getBytes(), null); + .writeSmallFile(client, blockID2, "data123".getBytes(UTF_8), null); try { blockID1.setBlockCommitSequenceId(bcsId + 1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java index 5efb57e1040..00aa9941d91 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestGetCommittedBlockLengthAndPutKey.java @@ -49,6 +49,8 @@ import org.junit.Test; import org.junit.rules.Timeout; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test Container calls. */ @@ -101,7 +103,7 @@ public void tesGetCommittedBlockLength() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(); + RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper .getWriteChunkRequest(container.getPipeline(), blockID, @@ -159,7 +161,7 @@ public void tesPutKeyResposne() throws Exception { BlockID blockID = ContainerTestHelper.getTestBlockID(containerID); byte[] data = - RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(); + RandomStringUtils.random(RandomUtils.nextInt(0, 1024)).getBytes(UTF_8); ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper .getWriteChunkRequest(container.getPipeline(), blockID, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java index 4025acac439..efe02f042d5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -53,6 +53,7 @@ import java.util.UUID; import java.util.stream.Collectors; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic .NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE; @@ -67,8 +68,8 @@ public class TestSCMContainerPlacementPolicyMetrics { private MiniOzoneCluster cluster; private MetricsRecordBuilder metrics; - private static OzoneClient ozClient = null; - private static ObjectStore store = null; + private OzoneClient ozClient = null; + private ObjectStore store = null; @Before public void setup() throws Exception { @@ -110,9 +111,9 @@ public void test() throws IOException { // Write data into a key try (OzoneOutputStream out = bucket.createKey(keyName, - value.getBytes().length, ReplicationType.RATIS, + value.getBytes(UTF_8).length, ReplicationType.RATIS, THREE, new HashMap<>())) { - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); } // close container diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index 394c102106e..057f5281e45 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -179,9 +179,9 @@ public void testSCMContainerStateCount() throws Exception { containerStateCount = scm.getContainerStateCount(); containerStateCount.forEach((k, v) -> { - if(k == HddsProtos.LifeCycleState.CLOSING.toString()) { + if(k.equals(HddsProtos.LifeCycleState.CLOSING.toString())) { assertEquals((int)v, 5); - } else if (k == HddsProtos.LifeCycleState.CLOSED.toString()) { + } else if (k.equals(HddsProtos.LifeCycleState.CLOSED.toString())) { assertEquals((int)v, 5); } else { // Remaining all container state count should be zero. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java index fe157340812..555b5c94dc4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java @@ -54,9 +54,9 @@ public class TestXceiverClientManager { */ @Rule public Timeout timeout = new Timeout(300000); - private static OzoneConfiguration config; - private static MiniOzoneCluster cluster; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private OzoneConfiguration config; + private MiniOzoneCluster cluster; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; @Rule diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java index fe88de31042..5bf39d7358c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestPipelineManagerMXBean.java @@ -52,7 +52,7 @@ public class TestPipelineManagerMXBean { public Timeout timeout = new Timeout(300000); private MiniOzoneCluster cluster; - private static MBeanServer mbs; + private MBeanServer mbs; @Before public void init() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java index 1219d898a7f..4d6e6b01d4b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineBytesWrittenMetrics.java @@ -47,6 +47,8 @@ import org.junit.Rule; import org.junit.rules.Timeout; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE; @@ -97,9 +99,9 @@ private void writeNumBytes(int numBytes) throws Exception { String keyName = UUID.randomUUID().toString(); OzoneOutputStream out = bucket - .createKey(keyName, value.getBytes().length, ReplicationType.RATIS, + .createKey(keyName, value.getBytes(UTF_8).length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>()); - out.write(value.getBytes()); + out.write(value.getBytes(UTF_8)); out.close(); OmKeyArgs.Builder builder = new OmKeyArgs.Builder(); @@ -108,7 +110,7 @@ private void writeNumBytes(int numBytes) throws Exception { OzoneKeyDetails keyDetails = bucket.getKey(keyName); Assert.assertEquals(keyName, keyDetails.getName()); - Assert.assertEquals(value.getBytes().length, keyDetails + Assert.assertEquals(value.getBytes(UTF_8).length, keyDetails .getOzoneKeyLocations().get(0).getLength()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 7671dc83616..ae71907bedb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -21,6 +21,7 @@ import java.io.File; import java.io.FileNotFoundException; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; import java.util.Arrays; import java.util.List; import java.util.UUID; @@ -44,6 +45,8 @@ import org.apache.hadoop.util.ToolRunner; import com.google.common.base.Strings; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY; import static org.apache.hadoop.fs.FileSystem.FS_DEFAULT_NAME_KEY; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; @@ -149,9 +152,9 @@ public static void shutdown() { } @Before - public void setup() { - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); + public void setup() throws UnsupportedEncodingException { + System.setOut(new PrintStream(out, false, UTF_8.name())); + System.setErr(new PrintStream(err, false, UTF_8.name())); } @After @@ -316,8 +319,8 @@ private void generateKeys(String volumeName, String bucketName) { /** * Helper function to get nums of keys from info of listing command. */ - private int getNumOfKeys() { - return out.toString().split("key").length - 1; + private int getNumOfKeys() throws UnsupportedEncodingException { + return out.toString(UTF_8.name()).split("key").length - 1; } /** @@ -339,8 +342,10 @@ private void generateBuckets(String volumeName, int numOfBuckets) { /** * Helper function to get nums of buckets from info of listing command. */ - private int getNumOfBuckets(String bucketPrefix) { - return out.toString().split(bucketPrefix).length - 1; + private int getNumOfBuckets(String bucketPrefix) + throws UnsupportedEncodingException { + return out.toString(UTF_8.name()) + .split(bucketPrefix).length - 1; } @@ -366,9 +371,9 @@ public void testOzoneShCmdURIs() { // TODO: Fix this behavior, then uncomment the execute() below. String setOmAddress = "--set=" + OMConfigKeys.OZONE_OM_ADDRESS_KEY + "=" + omLeaderNodeAddr; - String[] args = new String[] {setOmAddress, - "volume", "create", "o3://" + omLeaderNodeAddrWithoutPort + "/volume2"}; - //execute(ozoneShell, args); + String[] args = new String[] {setOmAddress, "volume", "create", + "o3://" + omLeaderNodeAddrWithoutPort + "/volume2"}; + execute(ozoneShell, args); // Test case 3: ozone sh volume create o3://om1:port/volume3 // Expectation: Success. @@ -403,7 +408,7 @@ public void testOzoneShCmdURIs() { * Test ozone shell list command. */ @Test - public void testOzoneShCmdList() { + public void testOzoneShCmdList() throws UnsupportedEncodingException { // Part of listing keys test. generateKeys("/volume4", "/bucket"); final String destinationBucket = "o3://" + omServiceId + "/volume4/bucket"; diff --git a/hadoop-ozone/interface-storage/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/interface-storage/dev-support/findbugsExcludeFile.xml index 9492f9c5d48..c08f04a59a2 100644 --- a/hadoop-ozone/interface-storage/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/interface-storage/dev-support/findbugsExcludeFile.xml @@ -18,4 +18,18 @@ + + + + + + + + + + + + + + diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java index 39c35f675e6..7e290de8c82 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java @@ -41,9 +41,10 @@ * This class tests OmKeyInfoCodec. */ public class TestOmKeyInfoCodec { - private final String volume = "hadoop"; - private final String bucket = "ozone"; - private final String keyName = "user/root/terasort/10G-input-6/part-m-00037"; + private static final String VOLUME = "hadoop"; + private static final String BUCKET = "ozone"; + private static final String KEYNAME = + "user/root/terasort/10G-input-6/part-m-00037"; private OmKeyInfo getKeyInfo(int chunkNum) { @@ -64,9 +65,9 @@ private OmKeyInfo getKeyInfo(int chunkNum) { .setModificationTime(Time.now()) .setReplicationType(HddsProtos.ReplicationType.RATIS) .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) - .setVolumeName(volume) - .setBucketName(bucket) - .setKeyName(keyName) + .setVolumeName(VOLUME) + .setBucketName(BUCKET) + .setKeyName(KEYNAME) .setObjectID(Time.now()) .setUpdateID(Time.now()) .setDataSize(100) diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java index f3d88f1c9b9..2c937bf946e 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java @@ -27,6 +27,8 @@ import java.util.UUID; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * This class tests OmMultipartKeyInfoCodec. */ @@ -60,7 +62,7 @@ public void testOmMultipartKeyInfoCodec() { // When random byte data passed returns null. try { - codec.fromPersistedFormat("random".getBytes()); + codec.fromPersistedFormat("random".getBytes(UTF_8)); } catch (IllegalArgumentException ex) { GenericTestUtils.assertExceptionContains("Can't encode the the raw " + "data from the byte array", ex); diff --git a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java index 0eb87b8cfa2..95b3f4d0a97 100644 --- a/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java +++ b/hadoop-ozone/interface-storage/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java @@ -42,9 +42,10 @@ * This class tests RepeatedOmKeyInfoCodec. */ public class TestRepeatedOmKeyInfoCodec { - private final String volume = "hadoop"; - private final String bucket = "ozone"; - private final String keyName = "user/root/terasort/10G-input-6/part-m-00037"; + private static final String VOLUME = "hadoop"; + private static final String BUCKET = "ozone"; + private static final String KEYNAME = + "user/root/terasort/10G-input-6/part-m-00037"; private OmKeyInfo getKeyInfo(int chunkNum) { @@ -65,9 +66,9 @@ private OmKeyInfo getKeyInfo(int chunkNum) { .setModificationTime(Time.now()) .setReplicationType(HddsProtos.ReplicationType.RATIS) .setReplicationFactor(HddsProtos.ReplicationFactor.THREE) - .setVolumeName(volume) - .setBucketName(bucket) - .setKeyName(keyName) + .setVolumeName(VOLUME) + .setBucketName(BUCKET) + .setKeyName(KEYNAME) .setObjectID(Time.now()) .setUpdateID(Time.now()) .setDataSize(100) diff --git a/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..898cda696e6 --- /dev/null +++ b/hadoop-ozone/ozone-manager/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,80 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index dc9075360bc..7b51ff32b98 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -157,6 +157,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java index a947b3550ce..9612c3ccf0a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java @@ -143,14 +143,9 @@ public void listMultipartUploads() throws IOException { createBucket(metadataManager, "vol1", "bucket1"); createBucket(metadataManager, "vol1", "bucket2"); - OmMultipartInfo upload1 = - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); - - OmMultipartInfo upload2 = - initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); - - OmMultipartInfo upload3 = - initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1"); + initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); + initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); + initMultipartUpload(keyManager, "vol1", "bucket2", "dir/key1"); //WHEN OmMultipartUploadList omMultipartUploadList = @@ -263,8 +258,7 @@ public void listMultipartUploadsWithPrefix() throws IOException { createBucket(metadataManager, "vol1", "bucket1"); createBucket(metadataManager, "vol1", "bucket2"); - OmMultipartInfo upload1 = - initMultipartUpload(keyManager, "vol1", "bucket1", "dip/key1"); + initMultipartUpload(keyManager, "vol1", "bucket1", "dip/key1"); initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key1"); initMultipartUpload(keyManager, "vol1", "bucket1", "dir/key2"); @@ -332,8 +326,6 @@ private OmMultipartInfo addinitMultipartUploadToCache( private void abortMultipart( String volume, String bucket, String key, String uploadID) { - Map partKeyInfoMap = - new HashMap<>(); metadataManager.getMultipartInfoTable().addCacheEntry( new CacheKey<>(metadataManager.getMultipartKey(volume, bucket, key, uploadID)), new CacheValue<>(Optional.absent(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java index 80281693c1b..595713b0538 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerStarter.java @@ -25,8 +25,11 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; import java.util.regex.Matcher; import java.util.regex.Pattern; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.junit.Assert.*; /** @@ -41,13 +44,14 @@ public class TestOzoneManagerStarter { private final ByteArrayOutputStream errContent = new ByteArrayOutputStream(); private final PrintStream originalOut = System.out; private final PrintStream originalErr = System.err; + private static final String DEFAULT_ENCODING = UTF_8.name(); private MockOMStarter mock; @Before - public void setUpStreams() { - System.setOut(new PrintStream(outContent)); - System.setErr(new PrintStream(errContent)); + public void setUpStreams() throws UnsupportedEncodingException { + System.setOut(new PrintStream(outContent, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(errContent, false, DEFAULT_ENCODING)); mock = new MockOMStarter(); } @@ -115,10 +119,11 @@ public void testInitThatReturnsFalseThrowsException() { } @Test - public void testUsagePrintedOnInvalidInput() { + public void testUsagePrintedOnInvalidInput() + throws UnsupportedEncodingException { executeCommand("--invalid"); Pattern p = Pattern.compile("^Unknown option:.*--invalid.*\nUsage"); - Matcher m = p.matcher(errContent.toString()); + Matcher m = p.matcher(errContent.toString(DEFAULT_ENCODING)); assertTrue(m.find()); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java index 310d42c0ca7..0ea6e068305 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java @@ -79,9 +79,12 @@ public void testAccessContorlExceptionFailovers() throws Exception { "om3", ex); Assert.assertTrue(ex.getCause() instanceof AccessControlException); - logCapturer.getOutput().contains(getRetryProxyDebugMsg("om1")); - logCapturer.getOutput().contains(getRetryProxyDebugMsg("om2")); - logCapturer.getOutput().contains(getRetryProxyDebugMsg("om3")); + Assert.assertTrue( + logCapturer.getOutput().contains(getRetryProxyDebugMsg("om1"))); + Assert.assertTrue( + logCapturer.getOutput().contains(getRetryProxyDebugMsg("om2"))); + Assert.assertTrue( + logCapturer.getOutput().contains(getRetryProxyDebugMsg("om3"))); } } @@ -90,7 +93,7 @@ private String getRetryProxyDebugMsg(String omNodeId) { "Permission denied."; } - private final class MockOzoneManagerProtocol + private static final class MockOzoneManagerProtocol implements OzoneManagerProtocolPB { private final String omNodeId; diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java index c875c9ccd8d..5be71095448 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisServer.java @@ -52,6 +52,7 @@ import org.mockito.Mockito; import org.slf4j.LoggerFactory; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.junit.Assert.assertFalse; import static org.mockito.Mockito.when; @@ -186,7 +187,7 @@ public void testIsReadOnlyCapturesAllCmdTypeEnums() throws Exception { public void verifyRaftGroupIdGenerationWithDefaultOmServiceId() throws Exception { UUID uuid = UUID.nameUUIDFromBytes(OzoneConsts.OM_SERVICE_ID_DEFAULT - .getBytes()); + .getBytes(UTF_8)); RaftGroupId raftGroupId = omRatisServer.getRaftGroup().getGroupId(); Assert.assertEquals(uuid, raftGroupId.getUuid()); Assert.assertEquals(raftGroupId.toByteString().size(), 16); @@ -219,7 +220,7 @@ public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws Collections.emptyList()); newOmRatisServer.start(); - UUID uuid = UUID.nameUUIDFromBytes(customOmServiceId.getBytes()); + UUID uuid = UUID.nameUUIDFromBytes(customOmServiceId.getBytes(UTF_8)); RaftGroupId raftGroupId = newOmRatisServer.getRaftGroup().getGroupId(); Assert.assertEquals(uuid, raftGroupId.getUuid()); Assert.assertEquals(raftGroupId.toByteString().size(), 16); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index 09c6ebca9fe..dcd1d83df84 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -142,9 +142,8 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { new OMBucketSetPropertyRequest(omRequest); int countException = 0; try { - OMClientResponse omClientResponse = - omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); + omBucketSetPropertyRequest.validateAndUpdateCache(ozoneManager, 1, + ozoneManagerDoubleBufferHelper); } catch (IllegalArgumentException ex) { countException++; GenericTestUtils.assertExceptionContains( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java index 887fbee231d..bff1943550b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMDirectoryCreateRequest.java @@ -128,10 +128,7 @@ public void testPreExecute() throws Exception { public void testValidateAndUpdateCache() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -163,10 +160,7 @@ public void testValidateAndUpdateCache() throws Exception { public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -195,10 +189,7 @@ public void testValidateAndUpdateCacheWithVolumeNotFound() throws Exception { public void testValidateAndUpdateCacheWithBucketNotFound() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); OMRequest omRequest = createDirectoryRequest(volumeName, bucketName, keyName); @@ -229,10 +220,7 @@ public void testValidateAndUpdateCacheWithSubDirectoryInPath() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -273,10 +261,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -319,10 +304,7 @@ public void testValidateAndUpdateCacheWithDirectoryAlreadyExists() public void testValidateAndUpdateCacheWithFilesInPath() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -360,10 +342,7 @@ public void testCreateDirectoryOMMetric() throws Exception { String volumeName = "vol1"; String bucketName = "bucket1"; - String keyName = RandomStringUtils.randomAlphabetic(5); - for (int i =0; i< 3; i++) { - keyName += "/" + RandomStringUtils.randomAlphabetic(5); - } + String keyName = genRandomKeyName(); // Add volume and bucket entries to DB. TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName, @@ -411,4 +390,12 @@ private OMRequest createDirectoryRequest(String volumeName, String bucketName, .setClientId(UUID.randomUUID().toString()).build(); } + private String genRandomKeyName() { + StringBuilder keyNameBuilder = new StringBuilder(); + keyNameBuilder.append(RandomStringUtils.randomAlphabetic(5)); + for (int i =0; i< 3; i++) { + keyNameBuilder.append("/").append(RandomStringUtils.randomAlphabetic(5)); + } + return keyNameBuilder.toString(); + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java index c7aa6be9aa3..a500f4c2ab1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java @@ -77,10 +77,10 @@ public void testPreExecute() throws Exception{ // KeyLocation should be set. Assert.assertTrue(keyLocations.size() == 1); - Assert.assertEquals(containerID, + Assert.assertEquals(CONTAINER_ID, keyLocations.get(0).getBlockID().getContainerBlockID() .getContainerID()); - Assert.assertEquals(localID, + Assert.assertEquals(LOCAL_ID, keyLocations.get(0).getBlockID().getContainerBlockID() .getLocalID()); Assert.assertTrue(keyLocations.get(0).hasPipeline()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java index f309da041a7..4b3d38f9b81 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMAllocateBlockRequest.java @@ -213,10 +213,10 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { // KeyLocation should be set. Assert.assertTrue(allocateBlockRequest.hasKeyLocation()); - Assert.assertEquals(containerID, + Assert.assertEquals(CONTAINER_ID, allocateBlockRequest.getKeyLocation().getBlockID() .getContainerBlockID().getContainerID()); - Assert.assertEquals(localID, + Assert.assertEquals(LOCAL_ID, allocateBlockRequest.getKeyLocation().getBlockID() .getContainerBlockID().getLocalID()); Assert.assertTrue(allocateBlockRequest.getKeyLocation().hasPipeline()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java index 2b8ffce4995..72699573130 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java @@ -293,10 +293,10 @@ private OMRequest doPreExecute(OMRequest originalOMRequest) throws Exception { keyArgs.getKeyLocationsList(); // KeyLocation should be set. Assert.assertTrue(keyLocations.size() == 1); - Assert.assertEquals(containerID, + Assert.assertEquals(CONTAINER_ID, keyLocations.get(0).getBlockID().getContainerBlockID() .getContainerID()); - Assert.assertEquals(localID, + Assert.assertEquals(LOCAL_ID, keyLocations.get(0).getBlockID().getContainerBlockID() .getLocalID()); Assert.assertTrue(keyLocations.get(0).hasPipeline()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index 116ba5ce8fb..7bf43a7226f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -20,6 +20,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Random; import java.util.UUID; import org.apache.commons.lang3.tuple.Pair; @@ -78,8 +79,8 @@ public class TestOMKeyRequest { protected OzoneBlockTokenSecretManager ozoneBlockTokenSecretManager; protected ScmBlockLocationProtocol scmBlockLocationProtocol; - protected final long containerID = 1000L; - protected final long localID = 100L; + protected static final long CONTAINER_ID = 1000L; + protected static final long LOCAL_ID = 100L; protected String volumeName; protected String bucketName; @@ -89,6 +90,7 @@ public class TestOMKeyRequest { protected long clientID; protected long scmBlockSize = 1000L; protected long dataSize; + protected Random random; // Just setting ozoneManagerDoubleBuffer which does nothing. protected OzoneManagerDoubleBufferHelper ozoneManagerDoubleBufferHelper = @@ -139,7 +141,7 @@ public void setup() throws Exception { AllocatedBlock allocatedBlock = new AllocatedBlock.Builder() - .setContainerBlockID(new ContainerBlockID(containerID, localID)) + .setContainerBlockID(new ContainerBlockID(CONTAINER_ID, LOCAL_ID)) .setPipeline(pipeline).build(); List allocatedBlocks = new ArrayList<>(); @@ -159,6 +161,7 @@ public void setup() throws Exception { replicationType = HddsProtos.ReplicationType.RATIS; clientID = Time.now(); dataSize = 1000L; + random = new Random(); Pair volumeAndBucket = Pair.of(volumeName, bucketName); when(ozoneManager.resolveBucketLink(any(KeyArgs.class), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java index 7eba6cafac7..36a1ec920c1 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java @@ -22,7 +22,6 @@ import java.util.Arrays; import java.util.List; import java.util.UUID; -import java.util.Random; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -265,7 +264,7 @@ private OpenKeyBucket makeOpenKeys(String volume, String bucket, for (int i = 0; i < numKeys; i++) { String keyName = UUID.randomUUID().toString(); - long clientID = new Random().nextLong(); + long clientID = random.nextLong(); OpenKey openKey = OpenKey.newBuilder() .setName(keyName) @@ -299,7 +298,7 @@ private OpenKeyBucket makeOpenKeys(String volume, String bucket, .setBucketName(bucket); for (int i = 0; i < numKeys; i++) { - long clientID = new Random().nextLong(); + long clientID = random.nextLong(); OpenKey openKey = OpenKey.newBuilder() .setName(key) diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java index ca0ebc8f06e..d7ca680d600 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/security/TestOMGetDelegationTokenRequest.java @@ -56,7 +56,7 @@ public class TestOMGetDelegationTokenRequest extends private OMRequest originalRequest; private OMRequest modifiedRequest; private OMGetDelegationTokenRequest omGetDelegationTokenRequest; - private final String checkResponse = ""; + private static final String CHECK_RESPONSE = ""; @Before public void setupGetDelegationToken() { @@ -138,7 +138,7 @@ public void testPreExecuteWithNonNullToken() throws Exception { /* In preExecute(), if the token is nonNull we set GetDelegationTokenResponse with response. */ - Assert.assertNotEquals(checkResponse, + Assert.assertNotEquals(CHECK_RESPONSE, modifiedRequest.getUpdateGetDelegationTokenRequest() .getGetDelegationTokenResponse() .toString()); @@ -157,7 +157,7 @@ public void testPreExecuteWithNullToken() throws Exception { /* In preExecute(), if the token is null we do not set GetDelegationTokenResponse with response. */ - Assert.assertEquals(checkResponse, + Assert.assertEquals(CHECK_RESPONSE, modifiedRequest.getUpdateGetDelegationTokenRequest() .getGetDelegationTokenResponse() .toString()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index 27116f25544..b8f36db0919 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -181,9 +181,8 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { int countException = 0; try { - OMClientResponse omClientResponse = - omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, - ozoneManagerDoubleBufferHelper); + omVolumeSetQuotaRequest.validateAndUpdateCache(ozoneManager, 1, + ozoneManagerDoubleBufferHelper); } catch (IllegalArgumentException ex) { countException++; GenericTestUtils.assertExceptionContains( diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java index f32474049a1..017d59f0e3f 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeAddAclRequest.java @@ -85,7 +85,6 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { omMetadataManager.getVolumeTable().get(volumeKey); // As request is valid volume table should have entry. Assert.assertNotNull(omVolumeArgs); - OmOzoneAclMap aclMapBeforeSet = omVolumeArgs.getAclMap(); OMClientResponse omClientResponse = omVolumeAddAclRequest.validateAndUpdateCache(ozoneManager, 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java index d374e472abf..3c49e6dd38c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/acl/TestOMVolumeSetAclRequest.java @@ -94,7 +94,6 @@ public void testValidateAndUpdateCacheSuccess() throws Exception { omMetadataManager.getVolumeTable().get(volumeKey); // As request is valid volume table should have entry. Assert.assertNotNull(omVolumeArgs); - OmOzoneAclMap aclMapBeforeSet = omVolumeArgs.getAclMap(); OMClientResponse omClientResponse = omVolumeSetAclRequest.validateAndUpdateCache(ozoneManager, 1, diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java index 5d2a3d8d741..2d63ebdcf81 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCommitResponse.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -79,9 +78,6 @@ public void testAddToDBBatchNoOp() throws Exception { OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setCreationTime(Time.now()).build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java index e3645ec8db7..4bef2efad5b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyCreateResponse.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.om.response.key; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -44,9 +43,6 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setCreationTime(Time.now()).build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java index 871e39f03d5..3c228325ad7 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyDeleteResponse.java @@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.util.Time; import org.junit.Assert; import org.junit.Test; @@ -45,9 +44,6 @@ public void testAddToDBBatch() throws Exception { OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setCreationTime(Time.now()).build(); @@ -87,9 +83,6 @@ public void testAddToDBBatchWithNonEmptyBlocks() throws Exception { OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volumeName, bucketName, keyName, replicationType, replicationFactor); - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setCreationTime(Time.now()).build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java index 1ad4c70d6c8..312fcaf3aa3 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeyResponse.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om.response.key; +import java.util.Random; import java.util.UUID; import org.junit.After; @@ -50,6 +51,7 @@ public class TestOMKeyResponse { protected HddsProtos.ReplicationFactor replicationFactor; protected HddsProtos.ReplicationType replicationType; protected long clientID; + protected Random random; @Before public void setup() throws Exception { @@ -65,6 +67,7 @@ public void setup() throws Exception { replicationFactor = HddsProtos.ReplicationFactor.ONE; replicationType = HddsProtos.ReplicationType.RATIS; clientID = 1000L; + random = new Random(); } @After diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java index 8951a05040f..c1ee665fc35 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysDeleteResponse.java @@ -20,7 +20,6 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -77,9 +76,6 @@ public void testKeysDeleteResponse() throws Exception { .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() .setStatus(true)).build(); - OmVolumeArgs omVolumeArgs = OmVolumeArgs.newBuilder() - .setOwnerName(keyName).setAdminName(keyName) - .setVolume(volumeName).setCreationTime(Time.now()).build(); OmBucketInfo omBucketInfo = OmBucketInfo.newBuilder() .setVolumeName(volumeName).setBucketName(bucketName) .setCreationTime(Time.now()).build(); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java index f28bb4ca31a..90d3204c326 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java @@ -29,7 +29,6 @@ import java.util.HashMap; import java.util.Map; -import java.util.Random; import java.util.UUID; /** @@ -158,7 +157,7 @@ private Map addOpenKeysToDB(String volume, int numKeys, for (int i = 0; i < numKeys; i++) { String bucket = UUID.randomUUID().toString(); String key = UUID.randomUUID().toString(); - long clientID = new Random().nextLong(); + long clientID = random.nextLong(); OmKeyInfo omKeyInfo = TestOMRequestUtils.createOmKeyInfo(volume, bucket, key, replicationType, replicationFactor); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java index 219ec97a6b9..e58683a150a 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestOzoneNativeAuthorizer.java @@ -104,10 +104,10 @@ public class TestOzoneNativeAuthorizer { private static UserGroupInformation adminUgi; private static UserGroupInformation testUgi; - private static OzoneObj volObj; - private static OzoneObj buckObj; - private static OzoneObj keyObj; - private static OzoneObj prefixObj; + private OzoneObj volObj; + private OzoneObj buckObj; + private OzoneObj keyObj; + private OzoneObj prefixObj; @Parameterized.Parameters public static Collection data() { diff --git a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java index 5e14abd8179..fec292ab8da 100644 --- a/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java @@ -115,14 +115,20 @@ public void bufferPositionUnchangedOnEOF() throws IOException { } @Test - public void testStreamCapability() { + public void testStreamCapability() throws IOException { final OzoneFSInputStream subject = createTestSubject(emptyStream()); - final CapableOzoneFSInputStream capableOzoneFSInputStream = - new CapableOzoneFSInputStream(subject, - new FileSystem.Statistics("test")); - - assertTrue(capableOzoneFSInputStream. - hasCapability(OzoneStreamCapabilities.READBYTEBUFFER)); + CapableOzoneFSInputStream capableOzoneFSInputStream = null; + try { + capableOzoneFSInputStream = new CapableOzoneFSInputStream(subject, + new FileSystem.Statistics("test")); + + assertTrue(capableOzoneFSInputStream. + hasCapability(OzoneStreamCapabilities.READBYTEBUFFER)); + } finally { + if (capableOzoneFSInputStream != null) { + capableOzoneFSInputStream.close(); + } + } } private static OzoneFSInputStream createTestSubject(InputStream input) { diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index e1444b49975..55707d40141 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -113,6 +113,13 @@ + + com.github.spotbugs + spotbugs-maven-plugin + + org.apache.hadoop.fs.ozone.* + + diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index 1ee74ddf83b..f52a10445e3 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -96,6 +96,13 @@ + + com.github.spotbugs + spotbugs-maven-plugin + + org.apache.hadoop.fs.ozone.* + + diff --git a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml index 7c0ba4dd4e9..56564961688 100644 --- a/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/recon/dev-support/findbugsExcludeFile.xml @@ -25,4 +25,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java index 91c8b64a1d8..95facc314b2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.recon.ReconUtils.createTarFile; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; @@ -30,9 +31,9 @@ import java.io.File; import java.io.FileInputStream; import java.io.FileOutputStream; -import java.io.FileWriter; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStreamWriter; import java.net.HttpURLConnection; import java.nio.charset.Charset; import java.nio.file.Paths; @@ -84,12 +85,14 @@ public void testCreateTarFile() throws Exception { tempSnapshotDir.mkdirs(); File file = new File(testDirName + "/temp1.txt"); - FileWriter writer = new FileWriter(file); + OutputStreamWriter writer = new OutputStreamWriter( + new FileOutputStream(file), UTF_8); writer.write("Test data 1"); writer.close(); file = new File(testDirName + "/temp2.txt"); - writer = new FileWriter(file); + writer = new OutputStreamWriter( + new FileOutputStream(file), UTF_8); writer.write("Test data 2"); writer.close(); @@ -112,15 +115,16 @@ public void testUntarCheckpointFile() throws Exception { File file1 = Paths.get(newDir.getAbsolutePath(), "file1") .toFile(); String str = "File1 Contents"; - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file1.getAbsoluteFile()), UTF_8)); writer.write(str); writer.close(); File file2 = Paths.get(newDir.getAbsolutePath(), "file2") .toFile(); str = "File2 Contents"; - writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath())); + writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file2.getAbsoluteFile()), UTF_8)); writer.write(str); writer.close(); @@ -138,8 +142,8 @@ public void testMakeHttpCall() throws Exception { String url = "http://localhost:9874/dbCheckpoint"; File file1 = Paths.get(folder.getRoot().getPath(), "file1") .toFile(); - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file1.getAbsoluteFile()), UTF_8)); writer.write("File 1 Contents"); writer.close(); InputStream fileInputStream = new FileInputStream(file1); @@ -166,15 +170,16 @@ public void testGetLastKnownDB() throws IOException { File file1 = Paths.get(newDir.getAbsolutePath(), "valid_1") .toFile(); String str = "File1 Contents"; - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); + BufferedWriter writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file1.getAbsoluteFile()), UTF_8)); writer.write(str); writer.close(); File file2 = Paths.get(newDir.getAbsolutePath(), "valid_2") .toFile(); str = "File2 Contents"; - writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath())); + writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file2.getAbsoluteFile()), UTF_8)); writer.write(str); writer.close(); @@ -182,7 +187,8 @@ public void testGetLastKnownDB() throws IOException { File file3 = Paths.get(newDir.getAbsolutePath(), "invalid_3") .toFile(); str = "File3 Contents"; - writer = new BufferedWriter(new FileWriter(file3.getAbsolutePath())); + writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file3.getAbsoluteFile()), UTF_8)); writer.write(str); writer.close(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index 5ef99c72389..f5cdbdf1745 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -75,9 +75,7 @@ import org.jooq.DSLContext; import org.junit.Assert; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.TemporaryFolder; import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline; @@ -133,27 +131,24 @@ public class TestEndpoints extends AbstractReconSqlDBTest { private Pipeline pipeline; private FileCountBySizeDao fileCountBySizeDao; private DSLContext dslContext; - private final String host1 = "host1.datanode"; - private final String host2 = "host2.datanode"; - private final String ip1 = "1.1.1.1"; - private final String ip2 = "2.2.2.2"; - private final String prometheusTestResponseFile = + private static final String HOST1 = "host1.datanode"; + private static final String HOST2 = "host2.datanode"; + private static final String IP1 = "1.1.1.1"; + private static final String IP2 = "2.2.2.2"; + private static final String PROMETHEUS_TEST_RESPONSE_FILE = "prometheus-test-response.txt"; private ReconUtils reconUtilsMock; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private void initializeInjector() throws Exception { reconOMMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(temporaryFolder.newFolder()), temporaryFolder.newFolder()); datanodeDetails = randomDatanodeDetails(); datanodeDetails2 = randomDatanodeDetails(); - datanodeDetails.setHostName(host1); - datanodeDetails.setIpAddress(ip1); - datanodeDetails2.setHostName(host2); - datanodeDetails2.setIpAddress(ip2); + datanodeDetails.setHostName(HOST1); + datanodeDetails.setIpAddress(IP1); + datanodeDetails2.setHostName(HOST2); + datanodeDetails2.setIpAddress(IP2); pipeline = getRandomPipeline(datanodeDetails); pipelineId = pipeline.getId().getId().toString(); @@ -179,7 +174,7 @@ private void initializeInjector() throws Exception { InputStream inputStream = Thread.currentThread().getContextClassLoader().getResourceAsStream( - prometheusTestResponseFile); + PROMETHEUS_TEST_RESPONSE_FILE); reconUtilsMock = mock(ReconUtils.class); HttpURLConnection urlConnectionMock = mock(HttpURLConnection.class); when(urlConnectionMock.getResponseCode()) @@ -268,9 +263,9 @@ public void setUp() throws Exception { .addPipelineReport(pipelineReport).build(); DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder() - .setHostName(host1) + .setHostName(HOST1) .setUuid(datanodeId) - .setIpAddress(ip1) + .setIpAddress(IP1) .build(); extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder() @@ -299,9 +294,9 @@ public void setUp() throws Exception { DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder() - .setHostName(host2) + .setHostName(HOST2) .setUuid(datanodeId2) - .setIpAddress(ip2) + .setIpAddress(IP2) .build(); ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder() @@ -383,7 +378,7 @@ private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) throws IOException { String hostname = datanodeMetadata.getHostname(); switch (hostname) { - case host1: + case HOST1: Assert.assertEquals(75000, datanodeMetadata.getDatanodeStorageReport().getCapacity()); Assert.assertEquals(15400, @@ -402,7 +397,7 @@ private void testDatanodeResponse(DatanodeMetadata datanodeMetadata) datanodeMetadata.getPipelines().get(0).getLeaderNode()); Assert.assertEquals(1, datanodeMetadata.getLeaderCount()); break; - case host2: + case HOST2: Assert.assertEquals(130000, datanodeMetadata.getDatanodeStorageReport().getCapacity()); Assert.assertEquals(17800, @@ -494,7 +489,7 @@ public void testGetMetricsResponse() throws Exception { // when the prometheus endpoint is queried. ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); InputStream inputStream = classLoader - .getResourceAsStream(prometheusTestResponseFile); + .getResourceAsStream(PROMETHEUS_TEST_RESPONSE_FILE); HttpURLConnection urlConnectionMock = mock(HttpURLConnection.class); when(urlConnectionMock.getResponseCode()) .thenReturn(HttpServletResponse.SC_OK); @@ -506,7 +501,8 @@ public void testGetMetricsResponse() throws Exception { uriInfoMock, responseMock); byte[] fileBytes = FileUtils.readFileToByteArray( - new File(classLoader.getResource(prometheusTestResponseFile).getFile()) + new File(classLoader.getResource(PROMETHEUS_TEST_RESPONSE_FILE) + .getFile()) ); verify(outputStreamMock).write(fileBytes, 0, fileBytes.length); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 469ddf9ddff..3a9e4d1206e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -240,7 +240,7 @@ private List getMockContainers(int num) { * of a datanode via setMisRepWhenDnPresent. If a DN with that UUID is passed * to validateContainerPlacement, then it will return an invalid placement. */ - private class MockPlacementPolicy implements PlacementPolicy { + private static class MockPlacementPolicy implements PlacementPolicy { private UUID misRepWhenDnPresent = null; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index 62baf1298ff..49322532aed 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -307,25 +307,25 @@ private UnhealthyContainers findRecordForState( private UnhealthyContainersRecord missingRecord() { return new UnhealthyContainersRecord(container.containerID().getId(), - UnHealthyContainerStates.MISSING.toString(), new Long(10), + UnHealthyContainerStates.MISSING.toString(), 10L, 3, 0, 3, null); } private UnhealthyContainersRecord underReplicatedRecord() { return new UnhealthyContainersRecord(container.containerID().getId(), UnHealthyContainerStates.UNDER_REPLICATED.toString(), - new Long(10), 3, 1, 2, null); + 10L, 3, 1, 2, null); } private UnhealthyContainersRecord overReplicatedRecord() { return new UnhealthyContainersRecord(container.containerID().getId(), - UnHealthyContainerStates.OVER_REPLICATED.toString(), new Long(10), + UnHealthyContainerStates.OVER_REPLICATED.toString(), 10L, 3, 5, -2, null); } private UnhealthyContainersRecord misReplicatedRecord() { return new UnhealthyContainersRecord(container.containerID().getId(), - UnHealthyContainerStates.MIS_REPLICATED.toString(), new Long(10), + UnHealthyContainerStates.MIS_REPLICATED.toString(), 10L, 3, 1, 2, "should be on 1 more rack"); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java index 727cfe3a986..a4b0b54da8e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestContainerDBServiceProviderImpl.java @@ -43,7 +43,7 @@ public class TestContainerDBServiceProviderImpl { @ClassRule - public static TemporaryFolder tempFolder = new TemporaryFolder(); + public static final TemporaryFolder TEMP_FOLDER = new TemporaryFolder(); private static ContainerDBServiceProvider containerDbServiceProvider; private String keyPrefix1 = "V3/B1/K1"; @@ -53,7 +53,7 @@ public class TestContainerDBServiceProviderImpl { @BeforeClass public static void setupOnce() throws Exception { ReconTestInjector reconTestInjector = - new ReconTestInjector.Builder(tempFolder) + new ReconTestInjector.Builder(TEMP_FOLDER) .withReconSqlDb() .withContainerDB() .build(); @@ -104,9 +104,10 @@ public void testInitNewContainerDB() throws Exception { "V1/B2/K3", 0); prefixCounts.put(ckp3, 3); - for (ContainerKeyPrefix prefix : prefixCounts.keySet()) { + for (Map.Entry entry : + prefixCounts.entrySet()) { containerDbServiceProvider.storeContainerKeyMapping( - prefix, prefixCounts.get(prefix)); + entry.getKey(), prefixCounts.get(entry.getKey())); } assertEquals(1, containerDbServiceProvider @@ -145,11 +146,11 @@ public void testStoreContainerKeyMapping() throws Exception { prefixCounts.put(keyPrefix2, 2); prefixCounts.put(keyPrefix3, 3); - for (String prefix : prefixCounts.keySet()) { + for (Map.Entry entry : prefixCounts.entrySet()) { ContainerKeyPrefix containerKeyPrefix = new ContainerKeyPrefix( - containerId, prefix, 0); + containerId, entry.getKey(), 0); containerDbServiceProvider.storeContainerKeyMapping( - containerKeyPrefix, prefixCounts.get(prefix)); + containerKeyPrefix, prefixCounts.get(entry.getKey())); } Assert.assertEquals(1, diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index 92feaeda8a7..9ccd750f670 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.recon.spi.impl; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeEmptyOmMetadataManager; import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager; @@ -43,9 +44,10 @@ import java.io.BufferedWriter; import java.io.File; import java.io.FileInputStream; -import java.io.FileWriter; +import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.OutputStreamWriter; import java.net.HttpURLConnection; import java.nio.file.Paths; @@ -151,17 +153,27 @@ public void testGetOzoneManagerDBSnapshot() throws Exception { File file1 = Paths.get(checkpointDir.getAbsolutePath(), "file1") .toFile(); String str = "File1 Contents"; - BufferedWriter writer = new BufferedWriter(new FileWriter( - file1.getAbsolutePath())); - writer.write(str); - writer.close(); + BufferedWriter writer = null; + try { + writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file1), UTF_8)); + writer.write(str); + } finally { + if (writer != null) { + writer.close(); + } + } File file2 = Paths.get(checkpointDir.getAbsolutePath(), "file2") .toFile(); str = "File2 Contents"; - writer = new BufferedWriter(new FileWriter(file2.getAbsolutePath())); - writer.write(str); - writer.close(); + try { + writer = new BufferedWriter(new OutputStreamWriter( + new FileOutputStream(file2), UTF_8)); + writer.write(str); + } finally { + writer.close(); + } //Create test tar file. File tarFile = createTarFile(checkpointDir.toPath()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java index 92c797be7fc..a90b2b7982b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOMDBUpdatesHandler.java @@ -57,6 +57,7 @@ public class TestOMDBUpdatesHandler { public TemporaryFolder folder = new TemporaryFolder(); private OMDBDefinition omdbDefinition = new OMDBDefinition(); + private Random random = new Random(); private OzoneConfiguration createNewTestPath() throws IOException { OzoneConfiguration configuration = new OzoneConfiguration(); @@ -231,8 +232,6 @@ public void testDelete() throws Exception { public void testGetKeyType() throws IOException { OzoneConfiguration configuration = createNewTestPath(); OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); - OMDBUpdatesHandler omdbUpdatesHandler = - new OMDBUpdatesHandler(metaMgr); assertEquals(String.class, omdbDefinition.getKeyType( metaMgr.getKeyTable().getName()).get()); @@ -244,8 +243,6 @@ public void testGetKeyType() throws IOException { public void testGetValueType() throws IOException { OzoneConfiguration configuration = createNewTestPath(); OmMetadataManagerImpl metaMgr = new OmMetadataManagerImpl(configuration); - OMDBUpdatesHandler omdbUpdatesHandler = - new OMDBUpdatesHandler(metaMgr); assertEquals(OmKeyInfo.class, omdbDefinition.getValueType( metaMgr.getKeyTable().getName()).get()); @@ -263,7 +260,7 @@ private OmKeyInfo getOmKeyInfo(String volumeName, String bucketName, .setKeyName(keyName) .setReplicationFactor(HddsProtos.ReplicationFactor.ONE) .setReplicationType(HddsProtos.ReplicationType.STAND_ALONE) - .setDataSize(new Random().nextLong()) + .setDataSize(random.nextLong()) .build(); } } \ No newline at end of file diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java index 94d76731e2f..8f7f76cd908 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java @@ -35,9 +35,7 @@ import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao; import org.jooq.DSLContext; import org.junit.Before; -import org.junit.Rule; import org.junit.Test; -import org.junit.rules.TemporaryFolder; import java.io.IOException; import java.util.ArrayList; @@ -63,9 +61,6 @@ public class TestTableCountTask extends AbstractReconSqlDBTest { private DSLContext dslContext; private boolean isSetupDone = false; - @Rule - public TemporaryFolder temporaryFolder = new TemporaryFolder(); - private void initializeInjector() throws IOException { ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(temporaryFolder.newFolder()), diff --git a/hadoop-ozone/s3gateway/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/s3gateway/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..295329c07d7 --- /dev/null +++ b/hadoop-ozone/s3gateway/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,21 @@ + + + + + + + + + diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 4a62fc72dce..2276775013f 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -220,6 +220,14 @@ + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index dd2c91db83d..7861bd4aad5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -297,13 +297,13 @@ public OzoneMultipartUploadPartListParts listParts(String key, /** * Class used to hold part information in a upload part request. */ - public class Part { + public static class Part { private String partName; private byte[] content; public Part(String name, byte[] data) { this.partName = name; - this.content = data; + this.content = data.clone(); } public String getPartName() { @@ -311,7 +311,7 @@ public String getPartName() { } public byte[] getContent() { - return content; + return content.clone(); } } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java index 8dcfe591d40..071d0934522 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestSignedChunksInputStream.java @@ -20,12 +20,13 @@ import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; -import java.nio.charset.StandardCharsets; import org.apache.commons.io.IOUtils; import org.junit.Assert; import org.junit.Test; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Test input stream parsing with signatures. */ @@ -36,14 +37,14 @@ public void emptyfile() throws IOException { InputStream is = fileContent("0;chunk-signature" + "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40"); - String result = IOUtils.toString(is, StandardCharsets.UTF_8); + String result = IOUtils.toString(is, UTF_8); Assert.assertEquals("", result); is = fileContent("0;chunk-signature" + "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" + "\n"); - result = IOUtils.toString(is, StandardCharsets.UTF_8); + result = IOUtils.toString(is, UTF_8); Assert.assertEquals("", result); } @@ -54,7 +55,7 @@ public void singlechunk() throws IOException { + "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" + "\n1234567890\r\n"); - String result = IOUtils.toString(is, StandardCharsets.UTF_8); + String result = IOUtils.toString(is, UTF_8); Assert.assertEquals("1234567890", result); //test read(byte[],int,int) @@ -64,7 +65,8 @@ public void singlechunk() throws IOException { + "\n1234567890\r\n"); byte[] bytes = new byte[10]; IOUtils.read(is, bytes, 0, 10); - Assert.assertEquals("1234567890", new String(bytes)); + Assert.assertEquals("1234567890", + new String(bytes, UTF_8)); } @Test @@ -74,7 +76,7 @@ public void singlechunkwithoutend() throws IOException { + "=23abb2bd920ddeeaac78a63ed808bc59fa6e7d3ef0e356474b82cdc2f8c93c40\r" + "\n1234567890"); - String result = IOUtils.toString(is, StandardCharsets.UTF_8); + String result = IOUtils.toString(is, UTF_8); Assert.assertEquals("1234567890", result); //test read(byte[],int,int) @@ -84,7 +86,8 @@ public void singlechunkwithoutend() throws IOException { + "\n1234567890"); byte[] bytes = new byte[10]; IOUtils.read(is, bytes, 0, 10); - Assert.assertEquals("1234567890", new String(bytes)); + Assert.assertEquals("1234567890", + new String(bytes, UTF_8)); } @Test @@ -94,7 +97,7 @@ public void multichunks() throws IOException { + "1234567890\r\n" + "05;chunk-signature=signature\r\n" + "abcde\r\n"); - String result = IOUtils.toString(is, StandardCharsets.UTF_8); + String result = IOUtils.toString(is, UTF_8); Assert.assertEquals("1234567890abcde", result); //test read(byte[],int,int) @@ -104,11 +107,12 @@ public void multichunks() throws IOException { + "abcde\r\n"); byte[] bytes = new byte[15]; IOUtils.read(is, bytes, 0, 15); - Assert.assertEquals("1234567890abcde", new String(bytes)); + Assert.assertEquals("1234567890abcde", + new String(bytes, UTF_8)); } private InputStream fileContent(String content) { return new SignedChunksInputStream( - new ByteArrayInputStream(content.getBytes())); + new ByteArrayInputStream(content.getBytes(UTF_8))); } } \ No newline at end of file diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 11a04b9264a..35c8f907871 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -39,8 +39,8 @@ */ public class TestVirtualHostStyleFilter { - private static OzoneConfiguration conf; - private static String s3HttpAddr; + private OzoneConfiguration conf; + private String s3HttpAddr; @Before public void setup() { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java index 8e87000b91d..9bafae5a4fc 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketGet.java @@ -314,9 +314,8 @@ public void listWithContinuationTokenFail() throws IOException { getBucket.setClient(ozoneClient); try { - ListObjectResponse getBucketResponse = - (ListObjectResponse) getBucket.list("b1", "/", null, null, 2, - "dir", null, "random", null, null, null).getEntity(); + getBucket.list("b1", "/", null, null, 2, "dir", null, "random", null, + null, null).getEntity(); fail("listWithContinuationTokenFail"); } catch (OS3Exception ex) { Assert.assertEquals("random", ex.getResource()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java index 6c41509aa20..ca6e1bdd353 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java @@ -95,8 +95,6 @@ public void testBucketFailWithAuthHeaderMissing() throws Exception { @Test public void testBucketPut() throws Exception { - String auth = generateAuthHeader(); - Response response = bucketEndpoint.put(bucketName, null); assertEquals(200, response.getStatus()); assertNotNull(response.getLocation()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java index 92fe20cf022..dcec0dfac0c 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java @@ -33,6 +33,7 @@ import java.io.ByteArrayInputStream; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -70,7 +71,8 @@ public static void setUp() throws Exception { assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); @@ -123,7 +125,7 @@ public void testListPartsContinuation() throws Exception { @Test public void testListPartsWithUnknownUploadID() throws Exception { try { - Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, + REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, uploadID, 2, "0", null); } catch (OS3Exception ex) { Assert.assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index fc9c1b42016..7626cc1f652 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -39,6 +39,8 @@ import java.util.UUID; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; + +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -85,7 +87,8 @@ private String initiateMultipartUpload(String key) throws IOException, private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 95c8fefb636..17911c52687 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER_RANGE; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; @@ -74,7 +75,7 @@ public static void setUp() throws Exception { OzoneBucket bucket = CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET); - byte[] keyContent = EXISTING_KEY_CONTENT.getBytes(); + byte[] keyContent = EXISTING_KEY_CONTENT.getBytes(UTF_8); try (OutputStream stream = bucket .createKey(EXISTING_KEY, keyContent.length, ReplicationType.RATIS, ReplicationFactor.THREE, new HashMap<>())) { @@ -124,7 +125,8 @@ public void testMultipart() throws Exception { OzoneBucket bucket = CLIENT.getObjectStore().getS3Bucket(OzoneConsts.S3_BUCKET); try (InputStream is = bucket.readKey(KEY)) { - String keyContent = new Scanner(is).useDelimiter("\\A").next(); + String keyContent = new Scanner(is, UTF_8.name()) + .useDelimiter("\\A").next(); Assert.assertEquals( content + EXISTING_KEY_CONTENT + EXISTING_KEY_CONTENT.substring(0, 4), keyContent); @@ -150,7 +152,8 @@ private String initiateMultipartUpload(String key) throws IOException, private Part uploadPart(String key, String uploadID, int partNumber, String content) throws IOException, OS3Exception { setHeaders(); - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, content.length(), partNumber, uploadID, body); assertEquals(200, response.getStatus()); @@ -172,7 +175,7 @@ private Part uploadPartWithCopy(String key, String uploadID, int partNumber, } setHeaders(additionalHeaders); - ByteArrayInputStream body = new ByteArrayInputStream("".getBytes()); + ByteArrayInputStream body = new ByteArrayInputStream("".getBytes(UTF_8)); Response response = REST.put(OzoneConsts.S3_BUCKET, key, 0, partNumber, uploadID, body); assertEquals(200, response.getStatus()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index 83e35051084..9684f9c4661 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -24,7 +24,6 @@ import javax.ws.rs.core.Response; import java.io.ByteArrayInputStream; import java.io.IOException; -import java.nio.charset.StandardCharsets; import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.ozone.client.OzoneClient; @@ -40,6 +39,7 @@ import org.junit.Test; import org.mockito.Mockito; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.COPY_SOURCE_HEADER; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.Assert.assertEquals; @@ -77,7 +77,8 @@ public void setup() throws IOException { public void testPutObject() throws IOException, OS3Exception { //GIVEN HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); objectEndpoint.setHeaders(headers); //WHEN @@ -90,7 +91,7 @@ public void testPutObject() throws IOException, OS3Exception { clientStub.getObjectStore().getS3Bucket(bucketName) .readKey(keyName); String keyContent = - IOUtils.toString(ozoneInputStream, StandardCharsets.UTF_8); + IOUtils.toString(ozoneInputStream, UTF_8); Assert.assertEquals(200, response.getStatus()); Assert.assertEquals(CONTENT, keyContent); @@ -113,14 +114,13 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { //WHEN Response response = objectEndpoint.put(bucketName, keyName, chunkedContent.length(), 1, null, - new ByteArrayInputStream(chunkedContent.getBytes())); + new ByteArrayInputStream(chunkedContent.getBytes(UTF_8))); //THEN OzoneInputStream ozoneInputStream = clientStub.getObjectStore().getS3Bucket(bucketName) .readKey(keyName); - String keyContent = - IOUtils.toString(ozoneInputStream, StandardCharsets.UTF_8); + String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); Assert.assertEquals(200, response.getStatus()); Assert.assertEquals("1234567890abcde", keyContent); @@ -130,7 +130,8 @@ public void testPutObjectWithSignedChunks() throws IOException, OS3Exception { public void testCopyObject() throws IOException, OS3Exception { // Put object in to source bucket HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); objectEndpoint.setHeaders(headers); keyName = "sourceKey"; @@ -141,8 +142,7 @@ public void testCopyObject() throws IOException, OS3Exception { .getS3Bucket(bucketName) .readKey(keyName); - String keyContent = IOUtils.toString(ozoneInputStream, - StandardCharsets.UTF_8); + String keyContent = IOUtils.toString(ozoneInputStream, UTF_8); Assert.assertEquals(200, response.getStatus()); Assert.assertEquals(CONTENT, keyContent); @@ -159,7 +159,7 @@ public void testCopyObject() throws IOException, OS3Exception { ozoneInputStream = clientStub.getObjectStore().getS3Bucket(destBucket) .readKey(destkey); - keyContent = IOUtils.toString(ozoneInputStream, StandardCharsets.UTF_8); + keyContent = IOUtils.toString(ozoneInputStream, UTF_8); Assert.assertEquals(200, response.getStatus()); Assert.assertEquals(CONTENT, keyContent); @@ -220,13 +220,14 @@ public void testCopyObject() throws IOException, OS3Exception { @Test public void testInvalidStorageType() throws IOException { HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); objectEndpoint.setHeaders(headers); keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn("random"); try { - Response response = objectEndpoint.put(bucketName, keyName, + objectEndpoint.put(bucketName, keyName, CONTENT.length(), 1, null, body); fail("testInvalidStorageType"); } catch (OS3Exception ex) { @@ -239,12 +240,13 @@ public void testInvalidStorageType() throws IOException { @Test public void testEmptyStorageType() throws IOException, OS3Exception { HttpHeaders headers = Mockito.mock(HttpHeaders.class); - ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); objectEndpoint.setHeaders(headers); keyName = "sourceKey"; when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(""); - Response response = objectEndpoint.put(bucketName, keyName, CONTENT + objectEndpoint.put(bucketName, keyName, CONTENT .length(), 1, null, body); OzoneKeyDetails key = diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index 33fdb45cf7b..03696889663 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -34,6 +34,7 @@ import java.io.ByteArrayInputStream; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; +import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; @@ -77,7 +78,8 @@ public void testPartUpload() throws Exception { assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); @@ -98,7 +100,8 @@ public void testPartUploadWithOverride() throws Exception { assertEquals(200, response.getStatus()); String content = "Multipart Upload"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); response = REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, uploadID, body); @@ -120,7 +123,8 @@ public void testPartUploadWithOverride() throws Exception { public void testPartUploadWithIncorrectUploadID() throws Exception { try { String content = "Multipart Upload With Incorrect uploadID"; - ByteArrayInputStream body = new ByteArrayInputStream(content.getBytes()); + ByteArrayInputStream body = + new ByteArrayInputStream(content.getBytes(UTF_8)); REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1, "random", body); fail("testPartUploadWithIncorrectUploadID failed"); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java similarity index 84% rename from hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java rename to hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java index fa6e2c7dfaf..e2653a52e92 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exception.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/exception/TestOS3Exceptions.java @@ -25,23 +25,23 @@ /** * This class tests OS3Exception class. */ -public class TestOS3Exception { +public class TestOS3Exceptions { @Test - public void testOS3Exception() { + public void testOS3Exceptions() { OS3Exception ex = new OS3Exception("AccessDenied", "Access Denied", 403); String requestId = OzoneUtils.getRequestID(); ex = S3ErrorTable.newError(ex, "bucket"); ex.setRequestId(requestId); String val = ex.toXml(); - String formatString = "\n" + - "\n" + - " %s\n" + - " %s\n" + - " %s\n" + - " %s\n" + - "\n"; + String formatString = "%n" + + "%n" + + " %s%n" + + " %s%n" + + " %s%n" + + " %s%n" + + "%n"; String expected = String.format(formatString, ex.getCode(), ex.getErrorMessage(), ex.getResource(), ex.getRequestId()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java index 5ca1c452241..ce80333aaa7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/header/TestAuthorizationHeaderV4.java @@ -65,7 +65,7 @@ public void testV4HeaderMissingParts() { String auth = "AWS4-HMAC-SHA256 " + "Credential=ozone/" + curDate + "/us-east-1/s3/aws4_request, " + "SignedHeaders=host;range;x-amz-date,"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); + new AuthorizationHeaderV4(auth); fail("Exception is expected in case of malformed header"); } catch (OS3Exception ex) { assertEquals("AuthorizationHeaderMalformed", ex.getCode()); @@ -79,7 +79,7 @@ public void testV4HeaderInvalidCredential() { "Credential=" + curDate + "/us-east-1/s3/aws4_request, " + "SignedHeaders=host;range;x-amz-date, " + "Signature=fe5f80f77d5fa3beca038a248ff027"; - AuthorizationHeaderV4 v4 = new AuthorizationHeaderV4(auth); + new AuthorizationHeaderV4(auth); fail("Exception is expected in case of malformed header"); } catch (OS3Exception ex) { assertEquals("AuthorizationHeaderMalformed", ex.getCode()); diff --git a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml index e6a345eea55..76127b7f06f 100644 --- a/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml +++ b/hadoop-ozone/tools/dev-support/findbugsExcludeFile.xml @@ -13,7 +13,21 @@ limitations under the License. See accompanying LICENSE file. --> - - - + + + + + + + + + + + + + + + + + diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java index 31864e2f104..be7203d70d7 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java @@ -37,10 +37,13 @@ import java.io.File; import java.io.IOException; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Tests AuditParser. */ @@ -53,6 +56,7 @@ public class TestAuditParser { private final ByteArrayOutputStream err = new ByteArrayOutputStream(); private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_ERR = System.err; + private static final String DEFAULT_CODING = UTF_8.name(); private static String dbName; private static final String LOGS = TestAuditParser.class .getClassLoader().getResource("testaudit.log").getPath(); @@ -73,9 +77,9 @@ public static void init() throws Exception { } @Before - public void setup() { - System.setOut(new PrintStream(OUT)); - System.setErr(new PrintStream(err)); + public void setup() throws UnsupportedEncodingException { + System.setOut(new PrintStream(OUT, false, DEFAULT_CODING)); + System.setErr(new PrintStream(err, false, DEFAULT_CODING)); } @After @@ -118,7 +122,10 @@ public List handleExecutionException(ExecutionException ex, }; cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); - Assert.assertTrue(OUT.toString().contains(msg)); + try { + Assert.assertTrue(OUT.toString(DEFAULT_CODING).contains(msg)); + } catch (UnsupportedEncodingException ignored) { + } } /** diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java index c3e79fd15a9..2a5223f4ff0 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java @@ -28,6 +28,9 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; + +import static java.nio.charset.StandardCharsets.UTF_8; /** * Tests the ozone getconf command. @@ -36,16 +39,17 @@ public class TestGetConfOptions { private static OzoneConfiguration conf; private static ByteArrayOutputStream bout; private static PrintStream psBackup; + private static final String DEFAULT_ENCODING = UTF_8.name(); @BeforeClass - public static void init() { + public static void init() throws UnsupportedEncodingException { conf = new OzoneConfiguration(); conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, "1"); conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "service1"); conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); psBackup = System.out; bout = new ByteArrayOutputStream(); - PrintStream psOut = new PrintStream(bout); + PrintStream psOut = new PrintStream(bout, false, DEFAULT_ENCODING); System.setOut(psOut); } @@ -60,31 +64,34 @@ public static void tearDown(){ } @Test - public void testGetConfWithTheOptionConfKey() { + public void testGetConfWithTheOptionConfKey() + throws UnsupportedEncodingException { new OzoneGetConf(conf) .run(new String[] {"-confKey", ScmConfigKeys.OZONE_SCM_NAMES}); - Assert.assertEquals("localhost\n", bout.toString()); + Assert.assertEquals("localhost\n", bout.toString(DEFAULT_ENCODING)); bout.reset(); new OzoneGetConf(conf) .run(new String[] {"confKey", OMConfigKeys.OZONE_OM_NODE_ID_KEY}); - Assert.assertEquals("1\n", bout.toString()); + Assert.assertEquals("1\n", bout.toString(DEFAULT_ENCODING)); } @Test - public void testGetConfWithTheOptionStorageContainerManagers() { + public void testGetConfWithTheOptionStorageContainerManagers() + throws UnsupportedEncodingException { new OzoneGetConf(conf).run(new String[] {"-storagecontainermanagers"}); - Assert.assertEquals("localhost\n", bout.toString()); + Assert.assertEquals("localhost\n", bout.toString(DEFAULT_ENCODING)); bout.reset(); new OzoneGetConf(conf).run(new String[] {"storagecontainermanagers"}); - Assert.assertEquals("localhost\n", bout.toString()); + Assert.assertEquals("localhost\n", bout.toString(DEFAULT_ENCODING)); } @Test - public void testGetConfWithTheOptionOzoneManagers() { + public void testGetConfWithTheOptionOzoneManagers() + throws UnsupportedEncodingException { new OzoneGetConf(conf).run(new String[] {"-ozonemanagers"}); - Assert.assertEquals("{service1=[]}\n", bout.toString()); + Assert.assertEquals("{service1=[]}\n", bout.toString(DEFAULT_ENCODING)); bout.reset(); new OzoneGetConf(conf).run(new String[] {"ozonemanagers"}); - Assert.assertEquals("{service1=[]}\n", bout.toString()); + Assert.assertEquals("{service1=[]}\n", bout.toString(DEFAULT_ENCODING)); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java index d61be3a42dc..e8de083f04a 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java @@ -39,7 +39,6 @@ public void writeWrite() throws IOException { @Test public void writeWithSmallerBuffers() throws IOException { ContentGenerator generator = new ContentGenerator(10000, 1024, 3); - ByteArrayOutputStream output = new ByteArrayOutputStream(); ByteArrayOutputStream baos = new ByteArrayOutputStream(); generator.write(baos); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java index e520190e4c9..8a66a144320 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/genconf/TestGenerateOzoneRequiredConfigurations.java @@ -40,11 +40,14 @@ import java.io.File; import java.io.IOException; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; import java.net.URL; import java.util.ArrayList; import java.util.Arrays; import java.util.List; +import static java.nio.charset.StandardCharsets.UTF_8; + /** * Tests GenerateOzoneRequiredConfigurations. */ @@ -57,6 +60,7 @@ public class TestGenerateOzoneRequiredConfigurations { private final ByteArrayOutputStream err = new ByteArrayOutputStream(); private static final PrintStream OLD_OUT = System.out; private static final PrintStream OLD_ERR = System.err; + private static final String DEFAULT_ENCODING = UTF_8.name(); /** * Creates output directory which will be used by the test-cases. * If a test-case needs a separate directory, it has to create a random @@ -73,8 +77,8 @@ public static void init() throws Exception { @Before public void setup() throws Exception { - System.setOut(new PrintStream(out)); - System.setErr(new PrintStream(err)); + System.setOut(new PrintStream(out, false, DEFAULT_ENCODING)); + System.setErr(new PrintStream(err, false, DEFAULT_ENCODING)); } @After @@ -96,7 +100,8 @@ public static void cleanup() throws IOException { FileUtils.deleteDirectory(outputBaseDir); } - private void execute(String[] args, String msg) { + private void execute(String[] args, String msg) + throws UnsupportedEncodingException { List arguments = new ArrayList(Arrays.asList(args)); LOG.info("Executing shell command with args {}", arguments); CommandLine cmd = genconfTool.getCmd(); @@ -117,7 +122,7 @@ public List handleExecutionException(ExecutionException ex, }; cmd.parseWithHandlers(new CommandLine.RunLast(), exceptionHandler, args); - Assert.assertTrue(out.toString().contains(msg)); + Assert.assertTrue(out.toString(DEFAULT_ENCODING).contains(msg)); } private void executeWithException(String[] args, String msg) { diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java index e62ba472af2..ac1f7fd61e5 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/test/OzoneTestDriver.java @@ -50,7 +50,9 @@ public void run(String[] args) { e.printStackTrace(); } - System.exit(exitCode); + if (exitCode != 0) { + throw new RuntimeException(); + } } public static void main(String[] args){ diff --git a/pom.xml b/pom.xml index 828e1e82871..d7707dfa7b6 100644 --- a/pom.xml +++ b/pom.xml @@ -1727,6 +1727,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs 1024 true + true