[SPARK-34407][K8S] KubernetesClusterSchedulerBackend.stop should clean up K8s resources

### What changes were proposed in this pull request?

This PR aims to fix `KubernetesClusterSchedulerBackend.stop` to wrap `super.stop` with `Utils.tryLogNonFatalError`.

### Why are the changes needed?

[CoarseGrainedSchedulerBackend.stop](https://github.com/apache/spark/blob/master/core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala#L559) may throw `SparkException` and this causes K8s resource (pod and configmap) leakage.

### Does this PR introduce _any_ user-facing change?

No. This is a bug fix.

### How was this patch tested?

Pass the CI with the newly added test case.

Closes #31533 from dongjoon-hyun/SPARK-34407.

Authored-by: Dongjoon Hyun <dhyun@apple.com>
Signed-off-by: Dongjoon Hyun <dhyun@apple.com>
This commit is contained in:
Dongjoon Hyun 2021-02-08 21:47:23 -08:00
parent a1e75edc39
commit ea339c38b4
2 changed files with 16 additions and 2 deletions

View file

@ -101,7 +101,11 @@ private[spark] class KubernetesClusterSchedulerBackend(
} }
override def stop(): Unit = { override def stop(): Unit = {
super.stop() // When `CoarseGrainedSchedulerBackend.stop` throws `SparkException`,
// K8s cluster scheduler should log and proceed in order to delete the K8s cluster resources.
Utils.tryLogNonFatalError {
super.stop()
}
Utils.tryLogNonFatalError { Utils.tryLogNonFatalError {
snapshotsStore.stop() snapshotsStore.stop()

View file

@ -34,7 +34,7 @@ import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.resource.{ResourceProfile, ResourceProfileManager} import org.apache.spark.resource.{ResourceProfile, ResourceProfileManager}
import org.apache.spark.rpc.{RpcEndpoint, RpcEndpointRef, RpcEnv} import org.apache.spark.rpc.{RpcEndpoint, RpcEndpointRef, RpcEnv}
import org.apache.spark.scheduler.{ExecutorKilled, LiveListenerBus, TaskSchedulerImpl} import org.apache.spark.scheduler.{ExecutorKilled, LiveListenerBus, TaskSchedulerImpl}
import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.RemoveExecutor import org.apache.spark.scheduler.cluster.CoarseGrainedClusterMessages.{RemoveExecutor, StopDriver}
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils.TEST_SPARK_APP_ID import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils.TEST_SPARK_APP_ID
@ -189,4 +189,14 @@ class KubernetesClusterSchedulerBackendSuite extends SparkFunSuite with BeforeAn
TimeUnit.MILLISECONDS) TimeUnit.MILLISECONDS)
verify(labeledPods).delete() verify(labeledPods).delete()
} }
test("SPARK-34407: CoarseGrainedSchedulerBackend.stop may throw SparkException") {
schedulerBackendUnderTest.start()
when(driverEndpointRef.askSync[Boolean](StopDriver)).thenThrow(new RuntimeException)
schedulerBackendUnderTest.stop()
// Verify the last operation of `schedulerBackendUnderTest.stop`.
verify(kubernetesClient).close()
}
} }