cleanup a bunch of imports

This commit is contained in:
Imran Rashid 2013-02-10 21:41:40 -08:00
parent 383af599bb
commit d9461b15d3
8 changed files with 22 additions and 35 deletions

View file

@ -3,8 +3,8 @@ package spark
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import storage.{DelegateBlockFetchTracker, BlockManagerId}
import util.TimedIterator
import spark.storage.{DelegateBlockFetchTracker, BlockManagerId}
import spark.util.TimedIterator
private[spark] class BlockStoreShuffleFetcher extends ShuffleFetcher with Logging {
override def fetch[K, V](shuffleId: Int, reduceId: Int) = {

View file

@ -1,7 +1,7 @@
package spark
import storage.BlockFetchTracker
import util.TimedIterator
import spark.storage.BlockFetchTracker
import spark.util.TimedIterator
private[spark] abstract class ShuffleFetcher {
/**

View file

@ -1,19 +1,15 @@
package spark
import java.io._
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicInteger
import java.net.{URI, URLClassLoader}
import java.lang.ref.WeakReference
import java.net.URI
import scala.collection.Map
import scala.collection.generic.Growable
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.collection.mutable.HashMap
import scala.collection.JavaConversions._
import akka.actor.Actor
import akka.actor.Actor._
import org.apache.hadoop.fs.{FileUtil, Path}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.mapred.InputFormat
import org.apache.hadoop.mapred.SequenceFileInputFormat
@ -33,23 +29,19 @@ import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.hadoop.mapreduce.{Job => NewHadoopJob}
import org.apache.mesos.{Scheduler, MesosNativeLibrary}
import org.apache.mesos.MesosNativeLibrary
import spark.broadcast._
import spark.deploy.LocalSparkCluster
import spark.partial.ApproximateEvaluator
import spark.partial.PartialResult
import rdd.{CheckpointRDD, HadoopRDD, NewHadoopRDD, UnionRDD}
import scheduler._
import spark.rdd.{CheckpointRDD, HadoopRDD, NewHadoopRDD, UnionRDD}
import spark.scheduler._
import spark.scheduler.local.LocalScheduler
import spark.scheduler.cluster.{SparkDeploySchedulerBackend, SchedulerBackend, ClusterScheduler}
import spark.scheduler.mesos.{CoarseMesosSchedulerBackend, MesosSchedulerBackend}
import storage.BlockManagerUI
import storage.RDDInfo
import storage.StorageStatus
import util.{MetadataCleaner, TimeStampedHashMap}
import storage.{StorageStatus, StorageUtils, RDDInfo}
import scala.Some
import spark.storage.BlockManagerUI
import spark.util.{MetadataCleaner, TimeStampedHashMap}
import spark.storage.{StorageStatus, StorageUtils, RDDInfo}
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark

View file

@ -1,7 +1,7 @@
package spark
import scala.collection.mutable.ArrayBuffer
import scheduler.Task
import spark.scheduler.Task
class TaskContext(val stageId: Int, val splitId: Int, val attemptId: Long, val task: Task[_]) extends Serializable {
//by adding Task here, I'm destroying the separation between Task & TaskContext ... not sure why they need to

View file

@ -1,22 +1,19 @@
package spark.scheduler
import cluster.TaskInfo
import java.net.URI
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.Future
import java.util.concurrent.LinkedBlockingQueue
import java.util.concurrent.TimeUnit
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Map}
import spark._
import executor.TaskMetrics
import spark.executor.TaskMetrics
import spark.partial.ApproximateActionListener
import spark.partial.ApproximateEvaluator
import spark.partial.PartialResult
import spark.storage.BlockManagerMaster
import spark.storage.BlockManagerId
import util.{MetadataCleaner, TimeStampedHashMap}
import spark.util.{MetadataCleaner, TimeStampedHashMap}
/**
* A Scheduler subclass that implements stage-oriented scheduling. It computes a DAG of stages for

View file

@ -1,10 +1,10 @@
package spark.scheduler
import cluster.TaskInfo
import spark.scheduler.cluster.TaskInfo
import scala.collection.mutable.Map
import spark._
import executor.TaskMetrics
import spark.executor.TaskMetrics
/**
* Types of events that can be handled by the DAGScheduler. The DAGScheduler uses an event queue

View file

@ -1,7 +1,7 @@
package spark.scheduler
import cluster.TaskInfo
import collection._
import spark.scheduler.cluster.TaskInfo
import scala.collection._
import spark.util.Distribution
import spark.executor.TaskMetrics

View file

@ -1,15 +1,13 @@
package spark.scheduler.local
import java.io.File
import java.net.URLClassLoader
import java.util.concurrent.Executors
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.HashMap
import spark._
import executor.ExecutorURLClassLoader
import spark.executor.ExecutorURLClassLoader
import spark.scheduler._
import cluster.TaskInfo
import spark.scheduler.cluster.TaskInfo
/**
* A simple TaskScheduler implementation that runs tasks locally in a thread pool. Optionally