diff --git a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala
index c9d5a6aee501c..460c932db42bd 100644
--- a/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala
+++ b/core/src/main/scala/org/apache/spark/ui/exec/ExecutorsPage.scala
@@ -69,7 +69,7 @@ private[ui] class ExecutorsPage(
}
private def listingExecTable(storageStatusList: Seq[StorageStatus], isActive: Boolean)
- : Seq[Node] = {
+ : Seq[Node] = {
val maxMem = storageStatusList.map(_.maxMem).sum
val memUsed = storageStatusList.map(_.memUsed).sum
val diskUsed = storageStatusList.map(_.diskUsed).sum
@@ -77,7 +77,7 @@ private[ui] class ExecutorsPage(
ExecutorsPage.getExecInfo(listener, statusId, isActive)
val execInfoSorted = execInfo.sortBy(_.id)
val logsExist = execInfo.filter(_.executorLogs.nonEmpty).nonEmpty
- val isShowThreadDump = threadDumpEnabled && isActive
+ val shouldShowThreadDump = threadDumpEnabled && isActive
// scalastyle:off
@@ -115,10 +115,10 @@ private[ui] class ExecutorsPage(
{if (logsExist)
Logs | else Seq.empty}
- {if (isShowThreadDump) Thread Dump | else Seq.empty}
+ {if (shouldShowThreadDump) Thread Dump | else Seq.empty}
- {execInfoSorted.map(execRow(_, logsExist, isShowThreadDump))}
+ {execInfoSorted.map(execRow(_, logsExist, shouldShowThreadDump))}
@@ -127,8 +127,8 @@ private[ui] class ExecutorsPage(
}
/** Render an HTML row representing an executor */
- private def execRow(info: ExecutorSummary, logsExist: Boolean, isShowThreadDump: Boolean)
- : Seq[Node] = {
+ private def execRow(info: ExecutorSummary, logsExist: Boolean, shouldShowThreadDump: Boolean)
+ : Seq[Node] = {
val maximumMemory = info.maxMemory
val memoryUsed = info.memoryUsed
val diskUsed = info.diskUsed
diff --git a/docs/configuration.md b/docs/configuration.md
index 741d6b2b37a87..00b9a52e058fa 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -565,6 +565,13 @@ Apart from these, the following properties are also available, and may be useful
How many finished batches the Spark UI and status APIs remember before garbage collecting.
+
+ spark.ui.retainedDeadExecutors |
+ 100 |
+
+ How many dead executors the Spark UI and status APIs remember before garbage collecting.
+ |
+
#### Compression and Serialization