[SPARK-1620] Handle uncaught exceptions in function run by Akka scheduler
If the intended behavior was that uncaught exceptions thrown in functions being run by the Akka scheduler would end up being handled by the default uncaught exception handler set in Executor, and if that behavior is, in fact, correct, then this is a way to accomplish that. I'm not certain, though, that we shouldn't be doing something different to handle uncaught exceptions from some of these scheduled functions. In any event, this PR covers all of the cases I comment on in [SPARK-1620](https://issues.apache.org/jira/browse/SPARK-1620). Author: Mark Hamstra <markhamstra@gmail.com> Closes #622 from markhamstra/SPARK-1620 and squashes the following commits: 071d193 [Mark Hamstra] refactored post-SPARK-1772 1a6a35e [Mark Hamstra] another style fix d30eb94 [Mark Hamstra] scalastyle 3573ecd [Mark Hamstra] Use wrapped try/catch in Utils.tryOrExit 8fc0439 [Mark Hamstra] Make functions run by the Akka scheduler use Executor's UncaughtExceptionHandler
This commit is contained in:
parent
d58cb33ffa
commit
17f3075bc4
|
@ -30,7 +30,7 @@ import org.apache.spark.{Logging, SparkConf, SparkException}
|
||||||
import org.apache.spark.deploy.{ApplicationDescription, ExecutorState}
|
import org.apache.spark.deploy.{ApplicationDescription, ExecutorState}
|
||||||
import org.apache.spark.deploy.DeployMessages._
|
import org.apache.spark.deploy.DeployMessages._
|
||||||
import org.apache.spark.deploy.master.Master
|
import org.apache.spark.deploy.master.Master
|
||||||
import org.apache.spark.util.AkkaUtils
|
import org.apache.spark.util.{Utils, AkkaUtils}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface allowing applications to speak with a Spark deploy cluster. Takes a master URL,
|
* Interface allowing applications to speak with a Spark deploy cluster. Takes a master URL,
|
||||||
|
@ -88,6 +88,7 @@ private[spark] class AppClient(
|
||||||
var retries = 0
|
var retries = 0
|
||||||
registrationRetryTimer = Some {
|
registrationRetryTimer = Some {
|
||||||
context.system.scheduler.schedule(REGISTRATION_TIMEOUT, REGISTRATION_TIMEOUT) {
|
context.system.scheduler.schedule(REGISTRATION_TIMEOUT, REGISTRATION_TIMEOUT) {
|
||||||
|
Utils.tryOrExit {
|
||||||
retries += 1
|
retries += 1
|
||||||
if (registered) {
|
if (registered) {
|
||||||
registrationRetryTimer.foreach(_.cancel())
|
registrationRetryTimer.foreach(_.cancel())
|
||||||
|
@ -99,6 +100,7 @@ private[spark] class AppClient(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
def changeMaster(url: String) {
|
def changeMaster(url: String) {
|
||||||
activeMasterUrl = url
|
activeMasterUrl = url
|
||||||
|
|
|
@ -166,6 +166,7 @@ private[spark] class Worker(
|
||||||
var retries = 0
|
var retries = 0
|
||||||
registrationRetryTimer = Some {
|
registrationRetryTimer = Some {
|
||||||
context.system.scheduler.schedule(REGISTRATION_TIMEOUT, REGISTRATION_TIMEOUT) {
|
context.system.scheduler.schedule(REGISTRATION_TIMEOUT, REGISTRATION_TIMEOUT) {
|
||||||
|
Utils.tryOrExit {
|
||||||
retries += 1
|
retries += 1
|
||||||
if (registered) {
|
if (registered) {
|
||||||
registrationRetryTimer.foreach(_.cancel())
|
registrationRetryTimer.foreach(_.cancel())
|
||||||
|
@ -178,6 +179,7 @@ private[spark] class Worker(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
override def receive = {
|
override def receive = {
|
||||||
case RegisteredWorker(masterUrl, masterWebUiUrl) =>
|
case RegisteredWorker(masterUrl, masterWebUiUrl) =>
|
||||||
|
|
|
@ -31,6 +31,7 @@ import scala.util.Random
|
||||||
import org.apache.spark._
|
import org.apache.spark._
|
||||||
import org.apache.spark.TaskState.TaskState
|
import org.apache.spark.TaskState.TaskState
|
||||||
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
|
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
|
||||||
|
import org.apache.spark.util.Utils
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Schedules tasks for multiple types of clusters by acting through a SchedulerBackend.
|
* Schedules tasks for multiple types of clusters by acting through a SchedulerBackend.
|
||||||
|
@ -139,7 +140,7 @@ private[spark] class TaskSchedulerImpl(
|
||||||
import sc.env.actorSystem.dispatcher
|
import sc.env.actorSystem.dispatcher
|
||||||
sc.env.actorSystem.scheduler.schedule(SPECULATION_INTERVAL milliseconds,
|
sc.env.actorSystem.scheduler.schedule(SPECULATION_INTERVAL milliseconds,
|
||||||
SPECULATION_INTERVAL milliseconds) {
|
SPECULATION_INTERVAL milliseconds) {
|
||||||
checkSpeculatableTasks()
|
Utils.tryOrExit { checkSpeculatableTasks() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -155,7 +155,7 @@ private[spark] class BlockManager(
|
||||||
BlockManagerWorker.startBlockManagerWorker(this)
|
BlockManagerWorker.startBlockManagerWorker(this)
|
||||||
if (!BlockManager.getDisableHeartBeatsForTesting(conf)) {
|
if (!BlockManager.getDisableHeartBeatsForTesting(conf)) {
|
||||||
heartBeatTask = actorSystem.scheduler.schedule(0.seconds, heartBeatFrequency.milliseconds) {
|
heartBeatTask = actorSystem.scheduler.schedule(0.seconds, heartBeatFrequency.milliseconds) {
|
||||||
heartBeat()
|
Utils.tryOrExit { heartBeat() }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,6 +40,7 @@ import tachyon.client.{TachyonFile,TachyonFS}
|
||||||
|
|
||||||
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkException}
|
import org.apache.spark.{Logging, SecurityManager, SparkConf, SparkException}
|
||||||
import org.apache.spark.deploy.SparkHadoopUtil
|
import org.apache.spark.deploy.SparkHadoopUtil
|
||||||
|
import org.apache.spark.executor.ExecutorUncaughtExceptionHandler
|
||||||
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
|
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -780,6 +781,18 @@ private[spark] object Utils extends Logging {
|
||||||
output.toString
|
output.toString
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a block of code that evaluates to Unit, forwarding any uncaught exceptions to the
|
||||||
|
* default UncaughtExceptionHandler
|
||||||
|
*/
|
||||||
|
def tryOrExit(block: => Unit) {
|
||||||
|
try {
|
||||||
|
block
|
||||||
|
} catch {
|
||||||
|
case t: Throwable => ExecutorUncaughtExceptionHandler.uncaughtException(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A regular expression to match classes of the "core" Spark API that we want to skip when
|
* A regular expression to match classes of the "core" Spark API that we want to skip when
|
||||||
* finding the call site of a method.
|
* finding the call site of a method.
|
||||||
|
|
Loading…
Reference in a new issue