From f7c7f4f8b6fe7ed55c1aed5494311ba23844d019 Mon Sep 17 00:00:00 2001 From: Shiqi Sun Date: Sat, 24 Oct 2020 09:55:57 -0700 Subject: [PATCH] [SPARK-30821][K8S] Handle executor failure with multiple containers Handle executor failure with multiple containers Added a spark property spark.kubernetes.executor.checkAllContainers, with default being false. When it's true, the executor snapshot will take all containers in the executor into consideration when deciding whether the executor is in "Running" state, if the pod restart policy is "Never". Also, added the new spark property to the doc. ### What changes were proposed in this pull request? Checking of all containers in the executor pod when reporting executor status, if the `spark.kubernetes.executor.checkAllContainers` property is set to true. ### Why are the changes needed? Currently, a pod remains "running" as long as there is at least one running container. This prevents Spark from noticing when a container has failed in an executor pod with multiple containers. With this change, user can configure the behavior to be different. Namely, if any container in the executor pod has failed, either the executor process or one of its sidecars, the pod is considered to be failed, and it will be rescheduled. ### Does this PR introduce _any_ user-facing change? Yes, new spark property added. User is now able to choose whether to turn on this feature using the `spark.kubernetes.executor.checkAllContainers` property. ### How was this patch tested? Unit test was added and all passed. I tried to run integration test by following the instruction [here](https://spark.apache.org/developer-tools.html) (section "Testing K8S") and also [here](https://github.com/apache/spark/blob/master/resource-managers/kubernetes/integration-tests/README.md), but I wasn't able to run it smoothly as it fails to talk with minikube cluster. Maybe it's because my minikube version is too new (I'm using v1.13.1)...? Since I've been trying it for two days and still can't make it work, I decided to submit this PR and hopefully the Jenkins test will pass. Closes #29924 from huskysun/exec-sidecar-failure. Authored-by: Shiqi Sun Signed-off-by: Holden Karau (cherry picked from commit f65952772702f0a8772c93b79f562f35c337f5a5) Signed-off-by: Holden Karau --- docs/running-on-kubernetes.md | 8 +++ .../org/apache/spark/deploy/k8s/Config.scala | 8 +++ .../cluster/k8s/ExecutorPodsSnapshot.scala | 16 +++++- .../k8s/KubernetesClusterManager.scala | 3 + ...erministicExecutorPodsSnapshotsStore.scala | 2 + .../k8s/ExecutorLifecycleTestUtils.scala | 32 ++++++++++- .../k8s/ExecutorPodsSnapshotSuite.scala | 56 +++++++++++++------ .../k8s/ExecutorPodsSnapshotsStoreSuite.scala | 1 + 8 files changed, 108 insertions(+), 18 deletions(-) diff --git a/docs/running-on-kubernetes.md b/docs/running-on-kubernetes.md index e6a000de1ab08..f49f953938391 100644 --- a/docs/running-on-kubernetes.md +++ b/docs/running-on-kubernetes.md @@ -1157,6 +1157,14 @@ See the [configuration page](configuration.html) for information on Spark config 3.0.0 + + spark.kubernetes.executor.checkAllContainers + false + + Specify whether executor pods should be check all containers (including sidecars) or only the executor container when determining the pod status. + + 3.1.0 + spark.kubernetes.submission.connectionTimeout 10000 diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala index 22f4c7505c09b..b08875bdd3420 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala @@ -409,6 +409,14 @@ private[spark] object Config extends Logging { .stringConf .createOptional + val KUBERNETES_EXECUTOR_CHECK_ALL_CONTAINERS = + ConfigBuilder("spark.kubernetes.executor.checkAllContainers") + .doc("If set to true, all containers in the executor pod will be checked when reporting" + + "executor status.") + .version("3.1.0") + .booleanConf + .createWithDefault(false) + val KUBERNETES_DRIVER_LABEL_PREFIX = "spark.kubernetes.driver.label." val KUBERNETES_DRIVER_ANNOTATION_PREFIX = "spark.kubernetes.driver.annotation." val KUBERNETES_DRIVER_SERVICE_ANNOTATION_PREFIX = "spark.kubernetes.driver.service.annotation." diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshot.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshot.scala index 435a5f1461c92..bd8da1080111f 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshot.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshot.scala @@ -18,6 +18,7 @@ package org.apache.spark.scheduler.cluster.k8s import java.util.Locale +import io.fabric8.kubernetes.api.model.ContainerStateTerminated import io.fabric8.kubernetes.api.model.Pod import org.apache.spark.deploy.k8s.Constants._ @@ -37,6 +38,7 @@ private[spark] case class ExecutorPodsSnapshot(executorPods: Map[Long, ExecutorP } object ExecutorPodsSnapshot extends Logging { + private var shouldCheckAllContainers: Boolean = _ def apply(executorPods: Seq[Pod]): ExecutorPodsSnapshot = { ExecutorPodsSnapshot(toStatesByExecutorId(executorPods)) @@ -44,6 +46,10 @@ object ExecutorPodsSnapshot extends Logging { def apply(): ExecutorPodsSnapshot = ExecutorPodsSnapshot(Map.empty[Long, ExecutorPodState]) + def setShouldCheckAllContainers(watchAllContainers: Boolean): Unit = { + shouldCheckAllContainers = watchAllContainers + } + private def toStatesByExecutorId(executorPods: Seq[Pod]): Map[Long, ExecutorPodState] = { executorPods.map { pod => (pod.getMetadata.getLabels.get(SPARK_EXECUTOR_ID_LABEL).toLong, toState(pod)) @@ -59,7 +65,15 @@ object ExecutorPodsSnapshot extends Logging { case "pending" => PodPending(pod) case "running" => - PodRunning(pod) + if (shouldCheckAllContainers && + "Never" == pod.getSpec.getRestartPolicy && + pod.getStatus.getContainerStatuses.stream + .map[ContainerStateTerminated](cs => cs.getState.getTerminated) + .anyMatch(t => t != null && t.getExitCode != 0)) { + PodFailed(pod) + } else { + PodRunning(pod) + } case "failed" => PodFailed(pod) case "succeeded" => diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterManager.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterManager.scala index b9d7a7083f41a..f2638129cd786 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterManager.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/scheduler/cluster/k8s/KubernetesClusterManager.scala @@ -91,10 +91,13 @@ private[spark] class KubernetesClusterManager extends ExternalClusterManager wit val schedulerExecutorService = ThreadUtils.newDaemonSingleThreadScheduledExecutor( "kubernetes-executor-maintenance") + ExecutorPodsSnapshot.setShouldCheckAllContainers( + sc.conf.get(KUBERNETES_EXECUTOR_CHECK_ALL_CONTAINERS)) val subscribersExecutor = ThreadUtils .newDaemonThreadPoolScheduledExecutor( "kubernetes-executor-snapshots-subscribers", 2) val snapshotsStore = new ExecutorPodsSnapshotsStoreImpl(subscribersExecutor) + val removedExecutorsCache = CacheBuilder.newBuilder() .expireAfterWrite(3, TimeUnit.MINUTES) .build[java.lang.Long, java.lang.Long]() diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala index 9ac7e0222054a..e64797213f33f 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/DeterministicExecutorPodsSnapshotsStore.scala @@ -21,6 +21,8 @@ import scala.collection.mutable class DeterministicExecutorPodsSnapshotsStore extends ExecutorPodsSnapshotsStore { + ExecutorPodsSnapshot.setShouldCheckAllContainers(false) + private val snapshotsBuffer = mutable.Buffer.empty[ExecutorPodsSnapshot] private val subscribers = mutable.Buffer.empty[Seq[ExecutorPodsSnapshot] => Unit] diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorLifecycleTestUtils.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorLifecycleTestUtils.scala index 2e883623a4b1c..30312d27b59ed 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorLifecycleTestUtils.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorLifecycleTestUtils.scala @@ -71,6 +71,33 @@ object ExecutorLifecycleTestUtils { .build() } + /** + * [SPARK-30821] + * This creates a pod with one container in running state and one container in failed + * state (terminated with non-zero exit code). This pod is used for unit-testing the + * spark.kubernetes.executor.checkAllContainers Spark Conf. + */ + def runningExecutorWithFailedContainer(executorId: Long): Pod = { + new PodBuilder(podWithAttachedContainerForId(executorId)) + .editOrNewStatus() + .withPhase("running") + .addNewContainerStatus() + .withNewState() + .withNewTerminated() + .withExitCode(1) + .endTerminated() + .endState() + .endContainerStatus() + .addNewContainerStatus() + .withNewState() + .withNewRunning() + .endRunning() + .endState() + .endContainerStatus() + .endStatus() + .build() + } + def succeededExecutor(executorId: Long): Pod = { new PodBuilder(podWithAttachedContainerForId(executorId)) .editOrNewStatus() @@ -112,7 +139,10 @@ object ExecutorLifecycleTestUtils { .addToLabels(SPARK_APP_ID_LABEL, TEST_SPARK_APP_ID) .addToLabels(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE) .addToLabels(SPARK_EXECUTOR_ID_LABEL, executorId.toString) - .endMetadata() + .endMetadata() + .editOrNewSpec() + .withRestartPolicy("Never") + .endSpec() .build() val container = new ContainerBuilder() .withName("spark-executor") diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotSuite.scala index 70e19c904eddb..96ad6b5ab32a5 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotSuite.scala @@ -16,31 +16,55 @@ */ package org.apache.spark.scheduler.cluster.k8s +import io.fabric8.kubernetes.api.model.Pod + import org.apache.spark.SparkFunSuite import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils._ class ExecutorPodsSnapshotSuite extends SparkFunSuite { + def testCase(pod: Pod, state: Pod => ExecutorPodState): (Pod, ExecutorPodState) = + (pod, state(pod)) + + def doTest(testCases: Seq[(Pod, ExecutorPodState)]): Unit = { + val snapshot = ExecutorPodsSnapshot(testCases.map(_._1)) + for (((_, state), i) <- testCases.zipWithIndex) { + assertResult(state.getClass.getName, s"executor ID $i") { + snapshot.executorPods(i).getClass.getName + } + } + } + test("States are interpreted correctly from pod metadata.") { - val pods = Seq( - pendingExecutor(0), - runningExecutor(1), - succeededExecutor(2), - failedExecutorWithoutDeletion(3), - deletedExecutor(4), - unknownExecutor(5)) - val snapshot = ExecutorPodsSnapshot(pods) - assert(snapshot.executorPods === - Map( - 0L -> PodPending(pods(0)), - 1L -> PodRunning(pods(1)), - 2L -> PodSucceeded(pods(2)), - 3L -> PodFailed(pods(3)), - 4L -> PodDeleted(pods(4)), - 5L -> PodUnknown(pods(5)))) + ExecutorPodsSnapshot.setShouldCheckAllContainers(false) + val testCases = Seq( + testCase(pendingExecutor(0), PodPending), + testCase(runningExecutor(1), PodRunning), + testCase(succeededExecutor(2), PodSucceeded), + testCase(failedExecutorWithoutDeletion(3), PodFailed), + testCase(deletedExecutor(4), PodDeleted), + testCase(unknownExecutor(5), PodUnknown) + ) + doTest(testCases) + } + + test("SPARK-30821: States are interpreted correctly from pod metadata" + + " when configured to check all containers.") { + ExecutorPodsSnapshot.setShouldCheckAllContainers(true) + val testCases = Seq( + testCase(pendingExecutor(0), PodPending), + testCase(runningExecutor(1), PodRunning), + testCase(runningExecutorWithFailedContainer(2), PodFailed), + testCase(succeededExecutor(3), PodSucceeded), + testCase(failedExecutorWithoutDeletion(4), PodFailed), + testCase(deletedExecutor(5), PodDeleted), + testCase(unknownExecutor(6), PodUnknown) + ) + doTest(testCases) } test("Updates add new pods for non-matching ids and edit existing pods for matching ids") { + ExecutorPodsSnapshot.setShouldCheckAllContainers(false) val originalPods = Seq( pendingExecutor(0), runningExecutor(1)) diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreSuite.scala index cf54b3c4eb329..614c198bd9caf 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsSnapshotsStoreSuite.scala @@ -35,6 +35,7 @@ class ExecutorPodsSnapshotsStoreSuite extends SparkFunSuite with BeforeAndAfter before { eventBufferScheduler = new DeterministicScheduler() eventQueueUnderTest = new ExecutorPodsSnapshotsStoreImpl(eventBufferScheduler) + ExecutorPodsSnapshot.setShouldCheckAllContainers(false) } test("Subscribers get notified of events periodically.") {