@@ -336,8 +336,8 @@ private[spark] class Executor(
336336 extends Runnable {
337337
338338 val taskId = taskDescription.taskId
339- val threadName = s " Executor task launch worker for task $taskId"
340339 val taskName = taskDescription.name
340+ val threadName = s " Executor task launch worker for $taskName"
341341 val mdcProperties = taskDescription.properties.asScala
342342 .filter(_._1.startsWith(" mdc." )).toSeq
343343
@@ -364,7 +364,7 @@ private[spark] class Executor(
364364 @ volatile var task : Task [Any ] = _
365365
366366 def kill (interruptThread : Boolean , reason : String ): Unit = {
367- logInfo(s " Executor is trying to kill $taskName (TID $taskId ) , reason: $reason" )
367+ logInfo(s " Executor is trying to kill $taskName, reason: $reason" )
368368 reasonIfKilled = Some (reason)
369369 if (task != null ) {
370370 synchronized {
@@ -425,7 +425,7 @@ private[spark] class Executor(
425425 } else 0L
426426 Thread .currentThread.setContextClassLoader(replClassLoader)
427427 val ser = env.closureSerializer.newInstance()
428- logInfo(s " Running $taskName (TID $taskId ) " )
428+ logInfo(s " Running $taskName" )
429429 execBackend.statusUpdate(taskId, TaskState .RUNNING , EMPTY_BYTE_BUFFER )
430430 var taskStartTimeNs : Long = 0
431431 var taskStartCpu : Long = 0
@@ -459,7 +459,7 @@ private[spark] class Executor(
459459 // MapOutputTrackerMaster and its cache invalidation is not based on epoch numbers so
460460 // we don't need to make any special calls here.
461461 if (! isLocal) {
462- logDebug(" Task " + taskId + " 's epoch is " + task.epoch)
462+ logDebug(s " $taskName 's epoch is ${ task.epoch} " )
463463 env.mapOutputTracker.asInstanceOf [MapOutputTrackerWorker ].updateEpoch(task.epoch)
464464 }
465465
@@ -485,7 +485,7 @@ private[spark] class Executor(
485485 val freedMemory = taskMemoryManager.cleanUpAllAllocatedMemory()
486486
487487 if (freedMemory > 0 && ! threwException) {
488- val errMsg = s " Managed memory leak detected; size = $freedMemory bytes, TID = $taskId "
488+ val errMsg = s " Managed memory leak detected; size = $freedMemory bytes, $taskName "
489489 if (conf.get(UNSAFE_EXCEPTION_ON_MEMORY_LEAK )) {
490490 throw new SparkException (errMsg)
491491 } else {
@@ -495,7 +495,7 @@ private[spark] class Executor(
495495
496496 if (releasedLocks.nonEmpty && ! threwException) {
497497 val errMsg =
498- s " ${releasedLocks.size} block locks were not released by TID = $taskId : \n " +
498+ s " ${releasedLocks.size} block locks were not released by $taskName \n " +
499499 releasedLocks.mkString(" [" , " , " , " ]" )
500500 if (conf.get(STORAGE_EXCEPTION_PIN_LEAK )) {
501501 throw new SparkException (errMsg)
@@ -508,7 +508,7 @@ private[spark] class Executor(
508508 // uh-oh. it appears the user code has caught the fetch-failure without throwing any
509509 // other exceptions. Its *possible* this is what the user meant to do (though highly
510510 // unlikely). So we will log an error and keep going.
511- logError(s " TID ${taskId} completed successfully though internally it encountered " +
511+ logError(s " $taskName completed successfully though internally it encountered " +
512512 s " unrecoverable fetch failures! Most likely this means user code is incorrectly " +
513513 s " swallowing Spark's internal ${classOf [FetchFailedException ]}" , fetchFailure)
514514 }
@@ -592,7 +592,7 @@ private[spark] class Executor(
592592 // directSend = sending directly back to the driver
593593 val serializedResult : ByteBuffer = {
594594 if (maxResultSize > 0 && resultSize > maxResultSize) {
595- logWarning(s " Finished $taskName (TID $taskId ) . Result is larger than maxResultSize " +
595+ logWarning(s " Finished $taskName. Result is larger than maxResultSize " +
596596 s " ( ${Utils .bytesToString(resultSize)} > ${Utils .bytesToString(maxResultSize)}), " +
597597 s " dropping it. " )
598598 ser.serialize(new IndirectTaskResult [Any ](TaskResultBlockId (taskId), resultSize))
@@ -602,11 +602,10 @@ private[spark] class Executor(
602602 blockId,
603603 new ChunkedByteBuffer (serializedDirectResult.duplicate()),
604604 StorageLevel .MEMORY_AND_DISK_SER )
605- logInfo(
606- s " Finished $taskName (TID $taskId). $resultSize bytes result sent via BlockManager) " )
605+ logInfo(s " Finished $taskName. $resultSize bytes result sent via BlockManager) " )
607606 ser.serialize(new IndirectTaskResult [Any ](blockId, resultSize))
608607 } else {
609- logInfo(s " Finished $taskName (TID $taskId ) . $resultSize bytes result sent to driver " )
608+ logInfo(s " Finished $taskName. $resultSize bytes result sent to driver " )
610609 serializedDirectResult
611610 }
612611 }
@@ -616,7 +615,7 @@ private[spark] class Executor(
616615 execBackend.statusUpdate(taskId, TaskState .FINISHED , serializedResult)
617616 } catch {
618617 case t : TaskKilledException =>
619- logInfo(s " Executor killed $taskName (TID $taskId ) , reason: ${t.reason}" )
618+ logInfo(s " Executor killed $taskName, reason: ${t.reason}" )
620619
621620 val (accums, accUpdates) = collectAccumulatorsAndResetStatusOnFailure(taskStartTimeNs)
622621 // Here and below, put task metric peaks in a WrappedArray to expose them as a Seq
@@ -629,7 +628,7 @@ private[spark] class Executor(
629628 case _ : InterruptedException | NonFatal (_) if
630629 task != null && task.reasonIfKilled.isDefined =>
631630 val killReason = task.reasonIfKilled.getOrElse(" unknown reason" )
632- logInfo(s " Executor interrupted and killed $taskName (TID $taskId ) , reason: $killReason" )
631+ logInfo(s " Executor interrupted and killed $taskName, reason: $killReason" )
633632
634633 val (accums, accUpdates) = collectAccumulatorsAndResetStatusOnFailure(taskStartTimeNs)
635634 val metricPeaks = WrappedArray .make(metricsPoller.getTaskMetricPeaks(taskId))
@@ -643,7 +642,7 @@ private[spark] class Executor(
643642 // there was a fetch failure in the task, but some user code wrapped that exception
644643 // and threw something else. Regardless, we treat it as a fetch failure.
645644 val fetchFailedCls = classOf [FetchFailedException ].getName
646- logWarning(s " TID ${taskId} encountered a ${fetchFailedCls} and " +
645+ logWarning(s " $taskName encountered a ${fetchFailedCls} and " +
647646 s " failed, but the ${fetchFailedCls} was hidden by another " +
648647 s " exception. Spark is handling this like a fetch failure and ignoring the " +
649648 s " other exception: $t" )
@@ -659,13 +658,13 @@ private[spark] class Executor(
659658 case t : Throwable if env.isStopped =>
660659 // Log the expected exception after executor.stop without stack traces
661660 // see: SPARK-19147
662- logError(s " Exception in $taskName (TID $taskId ) : ${t.getMessage}" )
661+ logError(s " Exception in $taskName: ${t.getMessage}" )
663662
664663 case t : Throwable =>
665664 // Attempt to exit cleanly by informing the driver of our failure.
666665 // If anything goes wrong (or this was a fatal exception), we will delegate to
667666 // the default uncaught exception handler, which will terminate the Executor.
668- logError(s " Exception in $taskName (TID $taskId ) " , t)
667+ logError(s " Exception in $taskName" , t)
669668
670669 // SPARK-20904: Do not report failure to driver if if happened during shut down. Because
671670 // libraries may set up shutdown hooks that race with running tasks during shutdown,
0 commit comments