diff --git a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala index c82a4bdabf..76d01f8384 100644 --- a/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala +++ b/core/src/main/scala/org/apache/spark/executor/CoarseGrainedExecutorBackend.scala @@ -202,11 +202,17 @@ private[spark] class CoarseGrainedExecutorBackend( stopping.set(true) new Thread("CoarseGrainedExecutorBackend-stop-executor") { override def run(): Unit = { - // executor.stop() will call `SparkEnv.stop()` which waits until RpcEnv stops totally. - // However, if `executor.stop()` runs in some thread of RpcEnv, RpcEnv won't be able to - // stop until `executor.stop()` returns, which becomes a dead-lock (See SPARK-14180). - // Therefore, we put this line in a new thread. - executor.stop() + // `executor` can be null if there's any error in `CoarseGrainedExecutorBackend.onStart` + // or fail to create `Executor`. + if (executor == null) { + System.exit(1) + } else { + // executor.stop() will call `SparkEnv.stop()` which waits until RpcEnv stops totally. + // However, if `executor.stop()` runs in some thread of RpcEnv, RpcEnv won't be able to + // stop until `executor.stop()` returns, which becomes a dead-lock (See SPARK-14180). + // Therefore, we put this line in a new thread. + executor.stop() + } } }.start() @@ -286,8 +292,7 @@ private[spark] class CoarseGrainedExecutorBackend( if (notifyDriver && driver.nonEmpty) { driver.get.send(RemoveExecutor(executorId, new ExecutorLossReason(reason))) } - - System.exit(code) + self.send(Shutdown) } else { logInfo("Skip exiting executor since it's been already asked to exit before.") }