From 00f6892c6532d6a2479d266143cf313587e33023 Mon Sep 17 00:00:00 2001 From: smallzhongfeng Date: Wed, 7 Sep 2022 20:24:56 +0800 Subject: [PATCH] add config to doc --- .../apache/uniffle/coordinator/CoordinatorConf.java | 12 ++++++------ .../LowestIOSampleCostSelectStorageStrategy.java | 6 +++--- .../LowestIOSampleCostSelectStorageStrategyTest.java | 2 +- docs/coordinator_guide.md | 4 ++++ .../org/apache/uniffle/test/FetchClientConfTest.java | 2 +- 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java b/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java index 239c978f21..28931a425a 100644 --- a/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java +++ b/coordinator/src/main/java/org/apache/uniffle/coordinator/CoordinatorConf.java @@ -134,18 +134,18 @@ public class CoordinatorConf extends RssBaseConf { .enumType(ApplicationManager.StrategyName.class) .defaultValue(APP_BALANCE) .withDescription("Strategy for selecting the remote path"); - public static final ConfigOption COORDINATOR_REMOTE_STORAGE_HEALTH_SCHEDULE_TIME = ConfigOptions - .key("rss.coordinator.remote.storage.health.schedule.time") + public static final ConfigOption COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_SCHEDULE_TIME = ConfigOptions + .key("rss.coordinator.remote.storage.io.sample.schedule.time") .longType() .defaultValue(60 * 1000L) .withDescription("The time of scheduling the read and write time of the paths to obtain different HDFS"); - public static final ConfigOption COORDINATOR_REMOTE_STORAGE_FILE_SIZE = ConfigOptions - .key("rss.coordinator.remote.storage.file.size") + public static final ConfigOption COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_FILE_SIZE = ConfigOptions + .key("rss.coordinator.remote.storage.io.sample.file.size") .intType() .defaultValue(204800 * 1000) .withDescription("The size of the file that the scheduled thread reads and writes"); - public static final ConfigOption COORDINATOR_REMOTE_STORAGE_ACCESS_TIMES = ConfigOptions - .key("rss.coordinator.remote.storage.access.times") + public static final ConfigOption COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_ACCESS_TIMES = ConfigOptions + .key("rss.coordinator.remote.storage.io.sample.access.times") .intType() .defaultValue(3) .withDescription("The number of times to read and write HDFS files"); diff --git a/coordinator/src/main/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategy.java b/coordinator/src/main/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategy.java index 21c419671a..96147b8e69 100644 --- a/coordinator/src/main/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategy.java +++ b/coordinator/src/main/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategy.java @@ -71,8 +71,8 @@ public class LowestIOSampleCostSelectStorageStrategy implements SelectStorageStr public LowestIOSampleCostSelectStorageStrategy(CoordinatorConf cf) { conf = new Configuration(); - fileSize = cf.getInteger(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_FILE_SIZE); - readAndWriteTimes = cf.getInteger(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_ACCESS_TIMES); + fileSize = cf.getInteger(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_FILE_SIZE); + readAndWriteTimes = cf.getInteger(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_ACCESS_TIMES); this.appIdToRemoteStorageInfo = Maps.newConcurrentMap(); this.remoteStoragePathRankValue = Maps.newConcurrentMap(); this.availableRemoteStorageInfo = Maps.newHashMap(); @@ -81,7 +81,7 @@ public LowestIOSampleCostSelectStorageStrategy(CoordinatorConf cf) { ThreadUtils.getThreadFactory("readWriteRankScheduler-%d")); // should init later than the refreshRemoteStorage init readWriteRankScheduler.scheduleAtFixedRate(this::checkReadAndWrite, 1000, - cf.getLong(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_HEALTH_SCHEDULE_TIME), TimeUnit.MILLISECONDS); + cf.getLong(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_SCHEDULE_TIME), TimeUnit.MILLISECONDS); } public void checkReadAndWrite() { diff --git a/coordinator/src/test/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategyTest.java b/coordinator/src/test/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategyTest.java index 9786db46f7..41eec3e758 100644 --- a/coordinator/src/test/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategyTest.java +++ b/coordinator/src/test/java/org/apache/uniffle/coordinator/LowestIOSampleCostSelectStorageStrategyTest.java @@ -77,7 +77,7 @@ public void setUpHdfs(String hdfsPath) throws Exception { Thread.sleep(500L); CoordinatorConf conf = new CoordinatorConf(); conf.set(CoordinatorConf.COORDINATOR_APP_EXPIRED, appExpiredTime); - conf.setLong(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_HEALTH_SCHEDULE_TIME, 5000); + conf.setLong(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_SCHEDULE_TIME, 5000); conf.set(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_SELECT_STRATEGY, IO_SAMPLE); applicationManager = new ApplicationManager(conf); selectStorageStrategy = (LowestIOSampleCostSelectStorageStrategy) applicationManager.getSelectStorageStrategy(); diff --git a/docs/coordinator_guide.md b/docs/coordinator_guide.md index cd71326d70..f9c263a27d 100644 --- a/docs/coordinator_guide.md +++ b/docs/coordinator_guide.md @@ -95,6 +95,10 @@ This document will introduce how to deploy Uniffle coordinators. |rss.coordinator.remote.storage.cluster.conf|-|Remote Storage Cluster related conf with format $clusterId,$key=$value, separated by ';'| |rss.rpc.server.port|-|RPC port for coordinator| |rss.jetty.http.port|-|Http port for coordinator| +|rss.coordinator.remote.storage.select.strategy|APP_BALANCE|Strategy for selecting the remote path| +|rss.coordinator.remote.storage.io.sample.schedule.time|60000|The time of scheduling the read and write time of the paths to obtain different HDFS| +|rss.coordinator.remote.storage.io.sample.file.size|204800000|The size of the file that the scheduled thread reads and writes| +|rss.coordinator.remote.storage.io.sample.access.times|3|The number of times to read and write HDFS files| ### AccessClusterLoadChecker settings |Property Name|Default| Description| diff --git a/integration-test/common/src/test/java/org/apache/uniffle/test/FetchClientConfTest.java b/integration-test/common/src/test/java/org/apache/uniffle/test/FetchClientConfTest.java index 88cfa07c13..73450d7deb 100644 --- a/integration-test/common/src/test/java/org/apache/uniffle/test/FetchClientConfTest.java +++ b/integration-test/common/src/test/java/org/apache/uniffle/test/FetchClientConfTest.java @@ -158,7 +158,7 @@ public void testFetchRemoteStorageByIO(@TempDir File tempDir) throws Exception { coordinatorConf.setBoolean(CoordinatorConf.COORDINATOR_DYNAMIC_CLIENT_CONF_ENABLED, true); coordinatorConf.setString(CoordinatorConf.COORDINATOR_DYNAMIC_CLIENT_CONF_PATH, cfgFile.toURI().toString()); coordinatorConf.setInteger(CoordinatorConf.COORDINATOR_DYNAMIC_CLIENT_CONF_UPDATE_INTERVAL_SEC, 2); - coordinatorConf.setLong(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_HEALTH_SCHEDULE_TIME, 500); + coordinatorConf.setLong(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_IO_SAMPLE_SCHEDULE_TIME, 500); coordinatorConf.set(CoordinatorConf.COORDINATOR_REMOTE_STORAGE_SELECT_STRATEGY, IO_SAMPLE); createCoordinatorServer(coordinatorConf); startServers();