From 2f16baaa98248b06cc20d34008e4b7e932aa9006 Mon Sep 17 00:00:00 2001 From: Kaijie Chen Date: Fri, 10 Mar 2023 21:52:41 +0800 Subject: [PATCH] [#708] test: do not assume hostname of hdfs mini-cluster (#709) ### What changes were proposed in this pull request? Do not assume hostname in hdfs URL. Also, let exception be thrown in `ShuffleHdfsStorageUtilsTest` to print verbose message. ### Why are the changes needed? Fix #708 ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? Tested manually --- .../apache/uniffle/common/KerberizedHdfs.java | 2 +- .../server/ShuffleFlushManagerTest.java | 2 +- .../apache/uniffle/storage/HdfsTestBase.java | 2 +- .../util/ShuffleHdfsStorageUtilsTest.java | 38 ++++++++----------- 4 files changed, 18 insertions(+), 26 deletions(-) diff --git a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java b/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java index dc1f19ad4e..38f551b527 100644 --- a/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java +++ b/common/src/test/java/org/apache/uniffle/common/KerberizedHdfs.java @@ -259,7 +259,7 @@ private List findAvailablePorts(int num) throws IOException { } public String getSchemeAndAuthorityPrefix() { - return String.format("hdfs://localhost:%s/", kerberizedDfsCluster.getNameNodePort()); + return kerberizedDfsCluster.getURI().toString() + "/"; } public Configuration getConf() throws IOException { diff --git a/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java b/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java index ea0e236e57..7ccaed9bfc 100644 --- a/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java +++ b/server/src/test/java/org/apache/uniffle/server/ShuffleFlushManagerTest.java @@ -153,7 +153,7 @@ public void writeTest() throws Exception { StorageManager storageManager = StorageManagerFactory.getInstance().createStorageManager(shuffleServerConf); storageManager.registerRemoteStorage(appId, remoteStorage); - String storageHost = "localhost"; + String storageHost = cluster.getURI().getHost(); assertEquals(0.0, ShuffleServerMetrics.counterRemoteStorageTotalWrite.get(storageHost).get(), 0.5); assertEquals(0.0, ShuffleServerMetrics.counterRemoteStorageRetryWrite.get(storageHost).get(), 0.5); assertEquals(0.0, ShuffleServerMetrics.counterRemoteStorageFailedWrite.get(storageHost).get(), 0.5); diff --git a/storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java b/storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java index df8d26d228..acab2e8349 100644 --- a/storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java +++ b/storage/src/test/java/org/apache/uniffle/storage/HdfsTestBase.java @@ -47,7 +47,7 @@ public static void setUpHdfs(@TempDir File tempDir) throws Exception { conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, baseDir.getAbsolutePath()); cluster = (new MiniDFSCluster.Builder(conf)).build(); - HDFS_URI = "hdfs://localhost:" + cluster.getNameNodePort() + "/"; + HDFS_URI = cluster.getURI().toString() + "/"; fs = (new Path(HDFS_URI)).getFileSystem(conf); } diff --git a/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java b/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java index dff02cff7e..bc68f0688e 100644 --- a/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java +++ b/storage/src/test/java/org/apache/uniffle/storage/util/ShuffleHdfsStorageUtilsTest.java @@ -32,7 +32,6 @@ import org.apache.uniffle.storage.handler.impl.HdfsFileWriter; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.fail; public class ShuffleHdfsStorageUtilsTest extends HdfsTestBase { @@ -46,27 +45,20 @@ public static void createAndRunCases( FileSystem fileSystem, String clusterPathPrefix, Configuration hadoopConf) throws Exception { - FileOutputStream fileOut = null; - DataOutputStream dataOut = null; - try { - File file = new File(tempDir, "test"); - fileOut = new FileOutputStream(file); - dataOut = new DataOutputStream(fileOut); - byte[] buf = new byte[2096]; - new Random().nextBytes(buf); - dataOut.write(buf); - dataOut.close(); - fileOut.close(); - String path = clusterPathPrefix + "test"; - HdfsFileWriter writer = new HdfsFileWriter(fileSystem, new Path(path), hadoopConf); - long size = ShuffleStorageUtils.uploadFile(file, writer, 1024); - assertEquals(2096, size); - size = ShuffleStorageUtils.uploadFile(file, writer, 100); - assertEquals(2096, size); - writer.close(); - } catch (Exception e) { - e.printStackTrace(); - fail(); - } + File file = new File(tempDir, "test"); + FileOutputStream fileOut = new FileOutputStream(file); + DataOutputStream dataOut = new DataOutputStream(fileOut); + byte[] buf = new byte[2096]; + new Random().nextBytes(buf); + dataOut.write(buf); + dataOut.close(); + fileOut.close(); + String path = clusterPathPrefix + "test"; + HdfsFileWriter writer = new HdfsFileWriter(fileSystem, new Path(path), hadoopConf); + long size = ShuffleStorageUtils.uploadFile(file, writer, 1024); + assertEquals(2096, size); + size = ShuffleStorageUtils.uploadFile(file, writer, 100); + assertEquals(2096, size); + writer.close(); } }