diff --git a/core/src/main/scala/org/apache/spark/internal/config/Deploy.scala b/core/src/main/scala/org/apache/spark/internal/config/Deploy.scala index ceab957b36634..d494c5ec019c7 100644 --- a/core/src/main/scala/org/apache/spark/internal/config/Deploy.scala +++ b/core/src/main/scala/org/apache/spark/internal/config/Deploy.scala @@ -19,48 +19,59 @@ package org.apache.spark.internal.config private[spark] object Deploy { val RECOVERY_MODE = ConfigBuilder("spark.deploy.recoveryMode") + .version("0.8.1") .stringConf .createWithDefault("NONE") val RECOVERY_MODE_FACTORY = ConfigBuilder("spark.deploy.recoveryMode.factory") + .version("1.2.0") .stringConf .createWithDefault("") val RECOVERY_DIRECTORY = ConfigBuilder("spark.deploy.recoveryDirectory") + .version("0.8.1") .stringConf .createWithDefault("") val ZOOKEEPER_URL = ConfigBuilder("spark.deploy.zookeeper.url") .doc(s"When `${RECOVERY_MODE.key}` is set to ZOOKEEPER, this " + "configuration is used to set the zookeeper URL to connect to.") + .version("0.8.1") .stringConf .createOptional val ZOOKEEPER_DIRECTORY = ConfigBuilder("spark.deploy.zookeeper.dir") + .version("0.8.1") .stringConf .createOptional val RETAINED_APPLICATIONS = ConfigBuilder("spark.deploy.retainedApplications") + .version("0.8.0") .intConf .createWithDefault(200) val RETAINED_DRIVERS = ConfigBuilder("spark.deploy.retainedDrivers") + .version("1.1.0") .intConf .createWithDefault(200) val REAPER_ITERATIONS = ConfigBuilder("spark.dead.worker.persistence") + .version("0.8.0") .intConf .createWithDefault(15) val MAX_EXECUTOR_RETRIES = ConfigBuilder("spark.deploy.maxExecutorRetries") + .version("1.6.3") .intConf .createWithDefault(10) val SPREAD_OUT_APPS = ConfigBuilder("spark.deploy.spreadOut") + .version("0.6.1") .booleanConf .createWithDefault(true) val DEFAULT_CORES = ConfigBuilder("spark.deploy.defaultCores") + .version("0.9.0") .intConf .createWithDefault(Int.MaxValue) diff --git a/docs/configuration.md b/docs/configuration.md index 2421e00f39068..86ec35b411578 100644 --- a/docs/configuration.md +++ b/docs/configuration.md @@ -2597,22 +2597,25 @@ Spark subsystems. ### Deploy - + + + +
Property NameDefaultMeaning
Property NameDefaultMeaningSince Version
spark.deploy.recoveryMode NONE The recovery mode setting to recover submitted Spark jobs with cluster mode when it failed and relaunches. This is only applicable for cluster mode when running with Standalone or Mesos.0.8.1
spark.deploy.zookeeper.url None When `spark.deploy.recoveryMode` is set to ZOOKEEPER, this configuration is used to set the zookeeper URL to connect to.0.8.1
spark.deploy.zookeeper.dir None When `spark.deploy.recoveryMode` is set to ZOOKEEPER, this configuration is used to set the zookeeper directory to store recovery state.0.8.1