From b10199413a1e9193c605dc17de276c78b578e545 Mon Sep 17 00:00:00 2001 From: Patrick Wendell Date: Thu, 1 Aug 2013 13:37:22 -0700 Subject: [PATCH] Slight refactoring to SparkContext functions --- core/src/main/scala/spark/SparkContext.scala | 13 ++++++++----- core/src/main/scala/spark/ui/jobs/PoolTable.scala | 4 ++-- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/core/src/main/scala/spark/SparkContext.scala b/core/src/main/scala/spark/SparkContext.scala index 2e2a699708..97e1aaf49e 100644 --- a/core/src/main/scala/spark/SparkContext.scala +++ b/core/src/main/scala/spark/SparkContext.scala @@ -581,10 +581,17 @@ class SparkContext( * Return pools for fair scheduler * TODO(xiajunluan): We should take nested pools into account */ - def getPools: ArrayBuffer[Schedulable] = { + def getAllPools: ArrayBuffer[Schedulable] = { taskScheduler.rootPool.schedulableQueue } + /** + * Return the pool associated with the given name, if one exists + */ + def getPoolForName(pool: String): Option[Schedulable] = { + taskScheduler.rootPool.schedulableNameToSchedulable.get(pool) + } + /** * Return current scheduling mode */ @@ -592,10 +599,6 @@ class SparkContext( taskScheduler.schedulingMode } - def getPoolNameToPool: HashMap[String, Schedulable] = { - taskScheduler.rootPool.schedulableNameToSchedulable - } - /** * Clear the job's list of files added by `addFile` so that they do not get downloaded to * any new nodes. diff --git a/core/src/main/scala/spark/ui/jobs/PoolTable.scala b/core/src/main/scala/spark/ui/jobs/PoolTable.scala index 8788ed8bc1..29061199df 100644 --- a/core/src/main/scala/spark/ui/jobs/PoolTable.scala +++ b/core/src/main/scala/spark/ui/jobs/PoolTable.scala @@ -38,7 +38,7 @@ private[spark] class FIFOSource() extends PoolSource { */ private[spark] class FairSource(sc: SparkContext) extends PoolSource { def getPools: Seq[Schedulable] = { - sc.getPools.toSeq + sc.getAllPools.toSeq } } @@ -48,7 +48,7 @@ private[spark] class FairSource(sc: SparkContext) extends PoolSource { private[spark] class PoolDetailSource(sc: SparkContext, poolName: String) extends PoolSource { def getPools: Seq[Schedulable] = { val pools = HashSet[Schedulable]() - pools += sc.getPoolNameToPool(poolName) + pools += sc.getPoolForName(poolName).get pools.toSeq } }