[SPARK-11334][CORE] Fix bug in Executor allocation manager in running tasks calculation

## What changes were proposed in this pull request?

We often see the issue of Spark jobs stuck because the Executor Allocation Manager does not ask for any executor even if there are pending tasks in case dynamic allocation is turned on. Looking at the logic in Executor Allocation Manager, which calculates the running tasks, it can happen that the calculation will be wrong and the number of running tasks can become negative.

## How was this patch tested?

Added unit test

Author: Sital Kedia <skedia@fb.com>

Closes #19580 from sitalkedia/skedia/fix_stuck_job.
This commit is contained in:
Sital Kedia 2017-10-31 09:49:58 -07:00 committed by Marcelo Vanzin
parent 4d9ebf3835
commit 7986cc09b1
2 changed files with 40 additions and 11 deletions

View file

@ -267,6 +267,10 @@ private[spark] class ExecutorAllocationManager(
(numRunningOrPendingTasks + tasksPerExecutor - 1) / tasksPerExecutor
}
private def totalRunningTasks(): Int = synchronized {
listener.totalRunningTasks
}
/**
* This is called at a fixed interval to regulate the number of pending executor requests
* and number of executors running.
@ -602,12 +606,11 @@ private[spark] class ExecutorAllocationManager(
private class ExecutorAllocationListener extends SparkListener {
private val stageIdToNumTasks = new mutable.HashMap[Int, Int]
// Number of running tasks per stage including speculative tasks.
// Should be 0 when no stages are active.
private val stageIdToNumRunningTask = new mutable.HashMap[Int, Int]
private val stageIdToTaskIndices = new mutable.HashMap[Int, mutable.HashSet[Int]]
private val executorIdToTaskIds = new mutable.HashMap[String, mutable.HashSet[Long]]
// Number of tasks currently running on the cluster including speculative tasks.
// Should be 0 when no stages are active.
private var numRunningTasks: Int = _
// Number of speculative tasks to be scheduled in each stage
private val stageIdToNumSpeculativeTasks = new mutable.HashMap[Int, Int]
// The speculative tasks started in each stage
@ -625,6 +628,7 @@ private[spark] class ExecutorAllocationManager(
val numTasks = stageSubmitted.stageInfo.numTasks
allocationManager.synchronized {
stageIdToNumTasks(stageId) = numTasks
stageIdToNumRunningTask(stageId) = 0
allocationManager.onSchedulerBacklogged()
// Compute the number of tasks requested by the stage on each host
@ -651,6 +655,7 @@ private[spark] class ExecutorAllocationManager(
val stageId = stageCompleted.stageInfo.stageId
allocationManager.synchronized {
stageIdToNumTasks -= stageId
stageIdToNumRunningTask -= stageId
stageIdToNumSpeculativeTasks -= stageId
stageIdToTaskIndices -= stageId
stageIdToSpeculativeTaskIndices -= stageId
@ -663,10 +668,6 @@ private[spark] class ExecutorAllocationManager(
// This is needed in case the stage is aborted for any reason
if (stageIdToNumTasks.isEmpty && stageIdToNumSpeculativeTasks.isEmpty) {
allocationManager.onSchedulerQueueEmpty()
if (numRunningTasks != 0) {
logWarning("No stages are running, but numRunningTasks != 0")
numRunningTasks = 0
}
}
}
}
@ -678,7 +679,9 @@ private[spark] class ExecutorAllocationManager(
val executorId = taskStart.taskInfo.executorId
allocationManager.synchronized {
numRunningTasks += 1
if (stageIdToNumRunningTask.contains(stageId)) {
stageIdToNumRunningTask(stageId) += 1
}
// This guards against the race condition in which the `SparkListenerTaskStart`
// event is posted before the `SparkListenerBlockManagerAdded` event, which is
// possible because these events are posted in different threads. (see SPARK-4951)
@ -709,7 +712,9 @@ private[spark] class ExecutorAllocationManager(
val taskIndex = taskEnd.taskInfo.index
val stageId = taskEnd.stageId
allocationManager.synchronized {
numRunningTasks -= 1
if (stageIdToNumRunningTask.contains(stageId)) {
stageIdToNumRunningTask(stageId) -= 1
}
// If the executor is no longer running any scheduled tasks, mark it as idle
if (executorIdToTaskIds.contains(executorId)) {
executorIdToTaskIds(executorId) -= taskId
@ -787,7 +792,9 @@ private[spark] class ExecutorAllocationManager(
/**
* The number of tasks currently running across all stages.
*/
def totalRunningTasks(): Int = numRunningTasks
def totalRunningTasks(): Int = {
stageIdToNumRunningTask.values.sum
}
/**
* Return true if an executor is not currently running a task, and false otherwise.

View file

@ -227,6 +227,23 @@ class ExecutorAllocationManagerSuite
assert(numExecutorsToAdd(manager) === 1)
}
test("ignore task end events from completed stages") {
sc = createSparkContext(0, 10, 0)
val manager = sc.executorAllocationManager.get
val stage = createStageInfo(0, 5)
post(sc.listenerBus, SparkListenerStageSubmitted(stage))
val taskInfo1 = createTaskInfo(0, 0, "executor-1")
val taskInfo2 = createTaskInfo(1, 1, "executor-1")
post(sc.listenerBus, SparkListenerTaskStart(0, 0, taskInfo1))
post(sc.listenerBus, SparkListenerTaskStart(0, 0, taskInfo2))
post(sc.listenerBus, SparkListenerStageCompleted(stage))
post(sc.listenerBus, SparkListenerTaskEnd(0, 0, null, Success, taskInfo1, null))
post(sc.listenerBus, SparkListenerTaskEnd(2, 0, null, Success, taskInfo2, null))
assert(totalRunningTasks(manager) === 0)
}
test("cancel pending executors when no longer needed") {
sc = createSparkContext(0, 10, 0)
val manager = sc.executorAllocationManager.get
@ -1107,6 +1124,7 @@ private object ExecutorAllocationManagerSuite extends PrivateMethodTester {
private val _localityAwareTasks = PrivateMethod[Int]('localityAwareTasks)
private val _hostToLocalTaskCount = PrivateMethod[Map[String, Int]]('hostToLocalTaskCount)
private val _onSpeculativeTaskSubmitted = PrivateMethod[Unit]('onSpeculativeTaskSubmitted)
private val _totalRunningTasks = PrivateMethod[Int]('totalRunningTasks)
private def numExecutorsToAdd(manager: ExecutorAllocationManager): Int = {
manager invokePrivate _numExecutorsToAdd()
@ -1190,6 +1208,10 @@ private object ExecutorAllocationManagerSuite extends PrivateMethodTester {
manager invokePrivate _localityAwareTasks()
}
private def totalRunningTasks(manager: ExecutorAllocationManager): Int = {
manager invokePrivate _totalRunningTasks()
}
private def hostToLocalTaskCount(manager: ExecutorAllocationManager): Map[String, Int] = {
manager invokePrivate _hostToLocalTaskCount()
}