Skip to content

Commit

Permalink
SPARK-5217 Spark UI should report pending stages during job execution…
Browse files Browse the repository at this point in the history
… on AllStagesPage.
  • Loading branch information
ScrapCodes committed Jan 19, 2015
1 parent 1955645 commit c19d82a
Show file tree
Hide file tree
Showing 2 changed files with 16 additions and 1 deletion.
12 changes: 12 additions & 0 deletions core/src/main/scala/org/apache/spark/ui/jobs/AllStagesPage.scala
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,18 @@ private[ui] class AllStagesPage(parent: StagesTab) extends WebUIPage("") {
val numCompletedStages = listener.numCompletedStages
val failedStages = listener.failedStages.reverse.toSeq
val numFailedStages = listener.numFailedStages
val pendingStages = listener.pendingStages.values.toSeq
val numWaitingStages = pendingStages.size
val now = System.currentTimeMillis

val activeStagesTable =
new StageTableBase(activeStages.sortBy(_.submissionTime).reverse,
parent.basePath, parent.listener, isFairScheduler = parent.isFairScheduler,
killEnabled = parent.killEnabled)
val pendingStagesTable =
new StageTableBase(pendingStages.sortBy(_.submissionTime).reverse,
parent.basePath, parent.listener, isFairScheduler = parent.isFairScheduler,
killEnabled = false)
val completedStagesTable =
new StageTableBase(completedStages.sortBy(_.submissionTime).reverse, parent.basePath,
parent.listener, isFairScheduler = parent.isFairScheduler, killEnabled = false)
Expand All @@ -68,6 +74,10 @@ private[ui] class AllStagesPage(parent: StagesTab) extends WebUIPage("") {
<strong>Scheduling Mode: </strong>
{listener.schedulingMode.map(_.toString).getOrElse("Unknown")}
</li>
<li>
<a href="#pending"><strong>Pending Stages:</strong></a>
{pendingStages.size}
</li>
<li>
<a href="#active"><strong>Active Stages:</strong></a>
{activeStages.size}
Expand All @@ -89,6 +99,8 @@ private[ui] class AllStagesPage(parent: StagesTab) extends WebUIPage("") {
} else {
Seq[Node]()
}} ++
<h4 id="pending">Pending Stages ({pendingStages.size})</h4> ++
pendingStagesTable.toNodeSeq ++
<h4 id="active">Active Stages ({activeStages.size})</h4> ++
activeStagesTable.toNodeSeq ++
<h4 id="completed">Completed Stages ({numCompletedStages})</h4> ++
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging {
val jobIdToData = new HashMap[JobId, JobUIData]

// Stages:
val pendingStages = new HashMap[StageId, StageInfo]
val activeStages = new HashMap[StageId, StageInfo]
val completedStages = ListBuffer[StageInfo]()
val skippedStages = ListBuffer[StageInfo]()
Expand Down Expand Up @@ -157,6 +158,7 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging {
stageIds = jobStart.stageIds,
jobGroup = jobGroup,
status = JobExecutionStatus.RUNNING)
jobStart.stageInfos.foreach(x => pendingStages(x.stageId) = x)
// Compute (a potential underestimate of) the number of tasks that will be run by this job.
// This may be an underestimate because the job start event references all of the result
// stages' transitive stage dependencies, but some of these stages might be skipped if their
Expand Down Expand Up @@ -187,6 +189,7 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging {
}
jobData.completionTime = Option(jobEnd.time).filter(_ >= 0)

jobData.stageIds.foreach(pendingStages.remove)
jobEnd.jobResult match {
case JobSucceeded =>
completedJobs += jobData
Expand Down Expand Up @@ -257,7 +260,7 @@ class JobProgressListener(conf: SparkConf) extends SparkListener with Logging {
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted) = synchronized {
val stage = stageSubmitted.stageInfo
activeStages(stage.stageId) = stage

pendingStages.remove(stage.stageId)
val poolName = Option(stageSubmitted.properties).map {
p => p.getProperty("spark.scheduler.pool", DEFAULT_POOL_NAME)
}.getOrElse(DEFAULT_POOL_NAME)
Expand Down

0 comments on commit c19d82a

Please sign in to comment.