From a7990db6238ce0a21f64492eaf15ec1b9c278e13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=BF=8D=E5=86=AC?= Date: Fri, 13 Apr 2018 10:37:37 +0800 Subject: [PATCH] fix when numExecutorsTarget equals maxNumExecutors --- .../scala/org/apache/spark/ExecutorAllocationManager.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala index 189d91333c045..bbbfea42642c6 100644 --- a/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala +++ b/core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala @@ -368,7 +368,7 @@ private[spark] class ExecutorAllocationManager( */ private def addExecutors(maxNumExecutorsNeeded: Int): Int = { // Do not request more executors if it would put our target over the upper bound - if (numExecutorsTarget >= maxNumExecutors) { + if (numExecutorsTarget > maxNumExecutors) { logDebug(s"Not adding executors because our current target total " + s"is already $numExecutorsTarget (limit $maxNumExecutors)") numExecutorsToAdd = 1 @@ -390,7 +390,7 @@ private[spark] class ExecutorAllocationManager( // If our target has not changed, do not send a message // to the cluster manager and reset our exponential growth - if (delta == 0) { + if (delta == 0 && numExecutorsTarget != maxNumExecutors) { // Check if there is any speculative jobs pending if (listener.pendingTasks == 0 && listener.pendingSpeculativeTasks > 0) { numExecutorsTarget =