Skip to content

Commit

Permalink
[SPARK-26873][SQL] Use a consistent timestamp to build Hadoop Job IDs.
Browse files Browse the repository at this point in the history
## What changes were proposed in this pull request?

Backport SPARK-26873 (#23777) to branch-2.3.

## How was this patch tested?

Existing tests to cover regressions.

Closes #23832 from rdblue/SPARK-26873-branch-2.3.

Authored-by: Ryan Blue <blue@apache.org>
Signed-off-by: Marcelo Vanzin <vanzin@cloudera.com>
  • Loading branch information
rdblue authored and Marcelo Vanzin committed Feb 19, 2019
1 parent 214b6b2 commit 41df43f
Showing 1 changed file with 4 additions and 1 deletion.
Expand Up @@ -190,12 +190,14 @@ object FileFormatWriter extends Logging {
global = false,
child = plan).execute()
}
val jobIdInstant = new Date().getTime
val ret = new Array[WriteTaskResult](rdd.partitions.length)
sparkSession.sparkContext.runJob(
rdd,
(taskContext: TaskContext, iter: Iterator[InternalRow]) => {
executeTask(
description = description,
jobIdInstant = jobIdInstant,
sparkStageId = taskContext.stageId(),
sparkPartitionId = taskContext.partitionId(),
sparkAttemptNumber = taskContext.taskAttemptId().toInt & Integer.MAX_VALUE,
Expand Down Expand Up @@ -228,13 +230,14 @@ object FileFormatWriter extends Logging {
/** Writes data out in a single Spark task. */
private def executeTask(
description: WriteJobDescription,
jobIdInstant: Long,
sparkStageId: Int,
sparkPartitionId: Int,
sparkAttemptNumber: Int,
committer: FileCommitProtocol,
iterator: Iterator[InternalRow]): WriteTaskResult = {

val jobId = SparkHadoopWriterUtils.createJobID(new Date, sparkStageId)
val jobId = SparkHadoopWriterUtils.createJobID(new Date(jobIdInstant), sparkStageId)
val taskId = new TaskID(jobId, TaskType.MAP, sparkPartitionId)
val taskAttemptId = new TaskAttemptID(taskId, sparkAttemptNumber)

Expand Down

0 comments on commit 41df43f

Please sign in to comment.