Skip to content

Commit

Permalink
Addressed more comments
Browse files Browse the repository at this point in the history
  • Loading branch information
liyinan926 committed Nov 28, 2017
1 parent c386186 commit b85cfc4
Show file tree
Hide file tree
Showing 3 changed files with 19 additions and 19 deletions.
2 changes: 1 addition & 1 deletion docs/configuration.md
Original file line number Diff line number Diff line change
Expand Up @@ -1397,7 +1397,7 @@ Apart from these, the following properties are also available, and may be useful
</tr>
<tr>
<td><code>spark.scheduler.minRegisteredResourcesRatio</code></td>
<td>2.3.0 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode</td>
<td>0.8 for KUBERNETES mode; 0.8 for YARN mode; 0.0 for standalone mode and Mesos coarse-grained mode</td>
<td>
The minimum ratio of registered resources (registered resources / total expected resources)
(resources are executors in yarn mode and Kubernetes mode, CPU cores in standalone mode and Mesos coarsed-grained
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,16 +24,16 @@ private[spark] object Config extends Logging {

val KUBERNETES_NAMESPACE =
ConfigBuilder("spark.kubernetes.namespace")
.doc("The namespace that will be used for running the driver and executor pods. When using" +
" spark-submit in cluster mode, this can also be passed to spark-submit via the" +
" --kubernetes-namespace command line argument.")
.doc("The namespace that will be used for running the driver and executor pods. When using " +
"spark-submit in cluster mode, this can also be passed to spark-submit via the " +
"--kubernetes-namespace command line argument.")
.stringConf
.createWithDefault("default")

val EXECUTOR_DOCKER_IMAGE =
ConfigBuilder("spark.kubernetes.executor.docker.image")
.doc("Docker image to use for the executors. Specify this using the standard Docker tag" +
" format.")
.doc("Docker image to use for the executors. Specify this using the standard Docker tag " +
"format.")
.stringConf
.createOptional

Expand All @@ -56,10 +56,10 @@ private[spark] object Config extends Logging {

val KUBERNETES_SERVICE_ACCOUNT_NAME =
ConfigBuilder(s"$APISERVER_AUTH_DRIVER_CONF_PREFIX.serviceAccountName")
.doc("Service account that is used when running the driver pod. The driver pod uses" +
" this service account when requesting executor pods from the API server. If specific" +
" credentials are given for the driver pod to use, the driver will favor" +
" using those credentials instead.")
.doc("Service account that is used when running the driver pod. The driver pod uses " +
"this service account when requesting executor pods from the API server. If specific " +
"credentials are given for the driver pod to use, the driver will favor " +
"using those credentials instead.")
.stringConf
.createOptional

Expand All @@ -68,9 +68,9 @@ private[spark] object Config extends Logging {
// based on the executor memory.
val KUBERNETES_EXECUTOR_MEMORY_OVERHEAD =
ConfigBuilder("spark.kubernetes.executor.memoryOverhead")
.doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This" +
" is memory that accounts for things like VM overheads, interned strings, other native" +
" overheads, etc. This tends to grow with the executor size. (typically 6-10%).")
.doc("The amount of off-heap memory (in megabytes) to be allocated per executor. This " +
"is memory that accounts for things like VM overheads, interned strings, other native " +
"overheads, etc. This tends to grow with the executor size. (typically 6-10%).")
.bytesConf(ByteUnit.MiB)
.createOptional

Expand Down Expand Up @@ -117,7 +117,7 @@ private[spark] object Config extends Logging {
.intConf
.checkValue(value => value > 0, "Maximum attempts of checks of executor lost reason " +
"must be a positive integer")
.createWithDefault(5)
.createWithDefault(10)

val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector."
}
Original file line number Diff line number Diff line change
Expand Up @@ -344,9 +344,9 @@ private[spark] class KubernetesClusterSchedulerBackend(
podsWithKnownExitReasons.put(pod.getMetadata.getName, executorExitReason)

if (!disconnectedPodsByExecutorIdPendingRemoval.containsKey(executorId)) {
log.warn(s"Executor with id $executorId was not marked as disconnected, but the" +
s" watch received an event of type $action for this executor. The executor may" +
" have failed to start in the first place and never registered with the driver.")
log.warn(s"Executor with id $executorId was not marked as disconnected, but the " +
s"watch received an event of type $action for this executor. The executor may " +
"have failed to start in the first place and never registered with the driver.")
}
disconnectedPodsByExecutorIdPendingRemoval.put(executorId, pod)

Expand Down Expand Up @@ -388,8 +388,8 @@ private[spark] class KubernetesClusterSchedulerBackend(
// container was probably actively killed by the driver.
if (isPodAlreadyReleased(pod)) {
ExecutorExited(containerExitStatus, exitCausedByApp = false,
s"Container in pod ${pod.getMetadata.getName} exited from explicit termination" +
" request.")
s"Container in pod ${pod.getMetadata.getName} exited from explicit termination " +
"request.")
} else {
val containerExitReason = s"Pod ${pod.getMetadata.getName}'s executor container " +
s"exited with exit status code $containerExitStatus."
Expand Down

0 comments on commit b85cfc4

Please sign in to comment.