[SPARK-13583][CORE][STREAMING] Remove unused imports and add checkstyle rule

## What changes were proposed in this pull request?

After SPARK-6990, `dev/lint-java` keeps Java code healthy and helps PR review by saving much time.
This issue aims remove unused imports from Java/Scala code and add `UnusedImports` checkstyle rule to help developers.

## How was this patch tested?
```
./dev/lint-java
./build/sbt compile
```

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #11438 from dongjoon-hyun/SPARK-13583.
This commit is contained in:
Dongjoon Hyun 2016-03-03 10:12:32 +00:00 committed by Sean Owen
parent e97fc7f176
commit b5f02d6743
226 changed files with 103 additions and 337 deletions

View file

@ -166,5 +166,6 @@
<property name="exceptionVariableName" value="expected"/>
</module>
<module name="CommentsIndentation"/>
<module name="UnusedImports"/>
</module>
</module>

View file

@ -19,7 +19,6 @@ package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;

View file

@ -19,7 +19,6 @@ package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;

View file

@ -19,7 +19,6 @@ package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import io.netty.buffer.Unpooled;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;

View file

@ -20,9 +20,6 @@ package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* Message indicating an error when transferring a stream.
*/

View file

@ -20,9 +20,6 @@ package org.apache.spark.network.protocol;
import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* Request to stream data from the remote end.
* <p>

View file

@ -21,7 +21,6 @@ import com.google.common.base.Objects;
import io.netty.buffer.ByteBuf;
import org.apache.spark.network.buffer.ManagedBuffer;
import org.apache.spark.network.buffer.NettyManagedBuffer;
/**
* Response to {@link StreamRequest} when the stream has been successfully opened.

View file

@ -33,7 +33,6 @@ import io.netty.channel.ChannelPromise;
import io.netty.channel.FileRegion;
import io.netty.handler.codec.MessageToMessageDecoder;
import io.netty.util.AbstractReferenceCounted;
import io.netty.util.ReferenceCountUtil;
import org.apache.spark.network.util.ByteArrayWritableChannel;
import org.apache.spark.network.util.NettyUtils;

View file

@ -19,7 +19,6 @@ package org.apache.spark.network.server;
import java.nio.ByteBuffer;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import io.netty.channel.Channel;
import io.netty.channel.ChannelFuture;

View file

@ -31,8 +31,6 @@ import io.netty.channel.epoll.EpollSocketChannel;
import io.netty.channel.nio.NioEventLoopGroup;
import io.netty.channel.socket.nio.NioServerSocketChannel;
import io.netty.channel.socket.nio.NioSocketChannel;
import io.netty.handler.codec.ByteToMessageDecoder;
import io.netty.handler.codec.LengthFieldBasedFrameDecoder;
import io.netty.util.internal.PlatformDependent;
/**

View file

@ -17,7 +17,6 @@
package org.apache.spark.network.util;
import java.util.Iterator;
import java.util.LinkedList;
import com.google.common.base.Preconditions;

View file

@ -24,7 +24,6 @@ import java.util.concurrent.ConcurrentHashMap;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.util.JavaUtils;
/**

View file

@ -30,7 +30,6 @@ import org.apache.spark.network.TestUtils;
import org.apache.spark.network.TransportContext;
import org.apache.spark.network.sasl.SaslServerBootstrap;
import org.apache.spark.network.sasl.SecretKeyHolder;
import org.apache.spark.network.server.RpcHandler;
import org.apache.spark.network.server.TransportServer;
import org.apache.spark.network.server.TransportServerBootstrap;
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo;

View file

@ -17,8 +17,6 @@
package org.apache.spark.shuffle.sort;
import org.apache.spark.memory.TaskMemoryManager;
/**
* Wrapper around an 8-byte word that holds a 24-bit partition number and 40-bit record pointer.
* <p>
@ -28,7 +26,7 @@ import org.apache.spark.memory.TaskMemoryManager;
* </pre>
* This implies that the maximum addressable page size is 2^27 bits = 128 megabytes, assuming that
* our offsets in pages are not 8-byte-word-aligned. Since we have 2^13 pages (based off the
* 13-bit page numbers assigned by {@link TaskMemoryManager}), this
* 13-bit page numbers assigned by {@link org.apache.spark.memory.TaskMemoryManager}), this
* implies that we can address 2^13 * 128 megabytes = 1 terabyte of RAM per task.
* <p>
* Assuming word-alignment would allow for a 1 gigabyte maximum page size, but we leave this

View file

@ -25,7 +25,6 @@ import java.util.Iterator;
import scala.Option;
import scala.Product2;
import scala.collection.JavaConverters;
import scala.collection.immutable.Map;
import scala.reflect.ClassTag;
import scala.reflect.ClassTag$;

View file

@ -20,7 +20,6 @@ package org.apache.spark.util.collection.unsafe.sort;
import com.google.common.primitives.UnsignedLongs;
import org.apache.spark.annotation.Private;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.types.ByteArray;
import org.apache.spark.unsafe.types.UTF8String;
import org.apache.spark.util.Utils;

View file

@ -17,11 +17,9 @@
package org.apache.spark.util.collection.unsafe.sort;
import org.apache.spark.memory.TaskMemoryManager;
final class RecordPointerAndKeyPrefix {
/**
* A pointer to a record; see {@link TaskMemoryManager} for a
* A pointer to a record; see {@link org.apache.spark.memory.TaskMemoryManager} for a
* description of how these addresses are encoded.
*/
public long recordPointer;

View file

@ -26,7 +26,7 @@ import com.codahale.metrics.{Gauge, MetricRegistry}
import org.apache.spark.metrics.source.Source
import org.apache.spark.scheduler._
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils}
/**
* An agent that dynamically allocates and removes executors based on the workload.

View file

@ -22,7 +22,6 @@ import java.util.concurrent.{ScheduledFuture, TimeUnit}
import scala.collection.mutable
import scala.concurrent.Future
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rpc.{RpcCallContext, RpcEnv, ThreadSafeRpcEndpoint}
import org.apache.spark.scheduler._
import org.apache.spark.storage.BlockManagerId

View file

@ -21,13 +21,13 @@ import java.io.{IOException, ObjectInputStream, ObjectOutputStream}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.reflect.{classTag, ClassTag}
import scala.reflect.ClassTag
import scala.util.hashing.byteswap32
import org.apache.spark.rdd.{PartitionPruningRDD, RDD}
import org.apache.spark.serializer.JavaSerializer
import org.apache.spark.util.{CollectionsUtils, Utils}
import org.apache.spark.util.random.{SamplingUtils, XORShiftRandom}
import org.apache.spark.util.random.SamplingUtils
/**
* An object that defines how the elements in a key-value pair RDD are partitioned by key.

View file

@ -51,7 +51,6 @@ import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat,
WholeTextFileInputFormat}
import org.apache.spark.io.CompressionCodec
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.rpc.RpcEndpointRef

View file

@ -17,8 +17,6 @@
package org.apache.spark
import org.apache.spark.annotation.DeveloperApi
/**
* Exception thrown when a task cannot be serialized.
*/

View file

@ -17,8 +17,6 @@
package org.apache.spark.api.java
import java.util.Comparator
import scala.language.implicitConversions
import scala.reflect.ClassTag
@ -191,7 +189,6 @@ class JavaRDD[T](val rdd: RDD[T])(implicit val classTag: ClassTag[T])
* Return this RDD sorted by the given key function.
*/
def sortBy[S](f: JFunction[T, S], ascending: Boolean, numPartitions: Int): JavaRDD[T] = {
import scala.collection.JavaConverters._
def fn: (T) => S = (x: T) => f.call(x)
import com.google.common.collect.Ordering // shadows scala.math.Ordering
implicit val ordering = Ordering.natural().asInstanceOf[Ordering[S]]

View file

@ -21,7 +21,6 @@ import scala.reflect.ClassTag
import org.apache.spark.SecurityManager
import org.apache.spark.SparkConf
import org.apache.spark.annotation.DeveloperApi
/**
* An interface for all the broadcast implementations in Spark (to allow

View file

@ -26,7 +26,7 @@ import scala.collection.JavaConverters._
import com.google.common.io.{ByteStreams, Files}
import org.apache.spark.{Logging, SparkException}
import org.apache.spark.Logging
import org.apache.spark.api.r.RUtils
import org.apache.spark.util.{RedirectThread, Utils}

View file

@ -19,7 +19,6 @@ package org.apache.spark.deploy.master
import java.util.Date
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.deploy.DriverDescription
import org.apache.spark.util.Utils

View file

@ -37,7 +37,7 @@ import org.apache.spark.deploy.master.{DriverState, Master}
import org.apache.spark.deploy.worker.ui.WorkerWebUI
import org.apache.spark.metrics.MetricsSystem
import org.apache.spark.rpc._
import org.apache.spark.util.{SignalLogger, ThreadUtils, Utils}
import org.apache.spark.util.{ThreadUtils, Utils}
private[deploy] class Worker(
override val rpcEnv: RpcEnv,

View file

@ -18,7 +18,6 @@
package org.apache.spark.deploy.worker.ui
import java.io.File
import java.net.URI
import javax.servlet.http.HttpServletRequest
import scala.xml.Node

View file

@ -18,7 +18,6 @@
package org.apache.spark.rdd
import org.apache.spark.{Logging, TaskContext}
import org.apache.spark.annotation.Experimental
import org.apache.spark.partial.BoundedDouble
import org.apache.spark.partial.MeanEvaluator
import org.apache.spark.partial.PartialResult

View file

@ -17,7 +17,7 @@
package org.apache.spark.rdd
import java.sql.{Connection, PreparedStatement, ResultSet}
import java.sql.{Connection, ResultSet}
import scala.reflect.ClassTag

View file

@ -19,7 +19,7 @@ package org.apache.spark.rdd
import scala.reflect.ClassTag
import org.apache.spark.{Partition, SparkContext, SparkEnv, SparkException, TaskContext}
import org.apache.spark.{Partition, SparkContext, SparkException, TaskContext}
import org.apache.spark.storage.RDDBlockId
/**

View file

@ -22,7 +22,6 @@ import scala.reflect.ClassTag
import org.apache.hadoop.fs.Path
import org.apache.spark._
import org.apache.spark.util.SerializableConfiguration
/**
* An implementation of checkpointing that writes the RDD data to reliable storage.

View file

@ -17,7 +17,6 @@
package org.apache.spark.rpc.netty
import java.io._
import java.lang.{Boolean => JBoolean}
import java.net.{InetSocketAddress, URI}
import java.nio.ByteBuffer
import java.nio.channels.{Pipe, ReadableByteChannel, WritableByteChannel}

View file

@ -19,7 +19,6 @@ package org.apache.spark.scheduler
import java.util.Properties
import org.apache.spark.TaskContext
import org.apache.spark.util.CallSite
/**

View file

@ -19,11 +19,9 @@ package org.apache.spark.scheduler
import java.util.Properties
import scala.collection.Map
import scala.language.existentials
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.RDD
import org.apache.spark.util.CallSite

View file

@ -17,7 +17,6 @@
package org.apache.spark.scheduler
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.SchedulingMode.SchedulingMode
import org.apache.spark.storage.BlockManagerId

View file

@ -22,7 +22,7 @@ import java.nio.ByteBuffer
import org.apache.spark.TaskState.TaskState
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler.ExecutorLossReason
import org.apache.spark.util.{SerializableBuffer, Utils}
import org.apache.spark.util.SerializableBuffer
private[spark] sealed trait CoarseGrainedClusterMessage extends Serializable

View file

@ -23,9 +23,9 @@ import javax.annotation.concurrent.NotThreadSafe
import scala.reflect.ClassTag
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.SparkEnv
import org.apache.spark.annotation.{DeveloperApi, Private}
import org.apache.spark.util.{ByteBufferInputStream, NextIterator, Utils}
import org.apache.spark.util.NextIterator
/**
* :: DeveloperApi ::

View file

@ -17,8 +17,7 @@
package org.apache.spark.shuffle
import org.apache.spark.{Aggregator, Partitioner, ShuffleDependency}
import org.apache.spark.serializer.Serializer
import org.apache.spark.ShuffleDependency
/**
* A basic ShuffleHandle implementation that just captures registerShuffle's parameters.

View file

@ -17,8 +17,6 @@
package org.apache.spark.shuffle
import java.nio.ByteBuffer
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.storage.ShuffleBlockId

View file

@ -20,7 +20,6 @@ package org.apache.spark.shuffle.hash
import java.io.IOException
import org.apache.spark._
import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle._

View file

@ -18,7 +18,6 @@
package org.apache.spark.shuffle.sort
import org.apache.spark._
import org.apache.spark.executor.ShuffleWriteMetrics
import org.apache.spark.scheduler.MapStatus
import org.apache.spark.shuffle.{BaseShuffleHandle, IndexShuffleBlockResolver, ShuffleWriter}
import org.apache.spark.storage.ShuffleBlockId

View file

@ -19,7 +19,7 @@ package org.apache.spark.storage
import scala.collection.Iterable
import scala.collection.generic.CanBuildFrom
import scala.concurrent.{Await, Future}
import scala.concurrent.Future
import org.apache.spark.{Logging, SparkConf, SparkException}
import org.apache.spark.rpc.RpcEndpointRef

View file

@ -22,7 +22,6 @@ import java.nio.ByteBuffer
import java.nio.channels.FileChannel.MapMode
import org.apache.spark.Logging
import org.apache.spark.serializer.Serializer
import org.apache.spark.util.Utils
/**

View file

@ -21,7 +21,7 @@ import java.net.{URI, URL}
import javax.servlet.DispatcherType
import javax.servlet.http.{HttpServlet, HttpServletRequest, HttpServletResponse}
import scala.collection.mutable.{ArrayBuffer, StringBuilder}
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import scala.xml.Node

View file

@ -19,7 +19,6 @@ package org.apache.spark.ui.exec
import javax.servlet.http.HttpServletRequest
import scala.util.Try
import scala.xml.{Node, Text}
import org.apache.spark.ui.{UIUtils, WebUIPage}

View file

@ -20,7 +20,6 @@ package org.apache.spark.util
import java.lang.management.ManagementFactory
import java.lang.reflect.{Field, Modifier}
import java.util.{IdentityHashMap, Random}
import java.util.concurrent.ConcurrentHashMap
import scala.collection.mutable.ArrayBuffer
import scala.runtime.ScalaRunTime

View file

@ -17,7 +17,7 @@
package org.apache.spark.util.collection
import java.util.{Arrays, Comparator}
import java.util.Comparator
import com.google.common.hash.Hashing

View file

@ -17,9 +17,6 @@
package org.apache.spark.launcher;
import java.io.BufferedReader;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;

View file

@ -21,7 +21,6 @@ import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import scala.Option;
import scala.reflect.ClassTag;

View file

@ -22,7 +22,7 @@ import scala.collection.mutable.ArrayBuffer
import org.mockito.Matchers.{any, isA}
import org.mockito.Mockito._
import org.apache.spark.rpc.{RpcAddress, RpcCallContext, RpcEndpointRef, RpcEnv}
import org.apache.spark.rpc.{RpcAddress, RpcCallContext, RpcEnv}
import org.apache.spark.scheduler.{CompressedMapStatus, MapStatus}
import org.apache.spark.shuffle.FetchFailedException
import org.apache.spark.storage.{BlockManagerId, ShuffleBlockId}

View file

@ -20,11 +20,8 @@ package org.apache.spark
import java.io.File
import javax.net.ssl.SSLContext
import com.google.common.io.Files
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.util.Utils
class SSLOptionsSuite extends SparkFunSuite with BeforeAndAfterAll {
test("test resolving property file as spark conf ") {

View file

@ -17,11 +17,9 @@
package org.apache.spark
import java.util.concurrent.{Semaphore, TimeUnit}
import java.util.concurrent.Semaphore
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger}
import org.apache.spark.scheduler._
/**
* Holds state shared across task threads in some ThreadingSuite tests.
*/

View file

@ -22,7 +22,7 @@ import java.net.URL
import scala.collection.mutable
import scala.io.Source
import org.apache.spark.{LocalSparkContext, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.{LocalSparkContext, SparkContext, SparkFunSuite}
import org.apache.spark.scheduler.{SparkListener, SparkListenerExecutorAdded}
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.util.SparkConfWithEnv

View file

@ -19,7 +19,6 @@ package org.apache.spark.deploy.client
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import org.scalatest.BeforeAndAfterAll

View file

@ -24,16 +24,14 @@ import java.util.concurrent.TimeUnit
import java.util.zip.{ZipInputStream, ZipOutputStream}
import scala.concurrent.duration._
import scala.io.Source
import scala.language.postfixOps
import com.google.common.base.Charsets
import com.google.common.io.{ByteStreams, Files}
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.json4s.jackson.JsonMethods._
import org.mockito.Matchers.any
import org.mockito.Mockito.{doReturn, mock, spy, verify, when}
import org.mockito.Mockito.{mock, spy, verify}
import org.scalatest.BeforeAndAfter
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._
@ -45,8 +43,6 @@ import org.apache.spark.util.{Clock, JsonProtocol, ManualClock, Utils}
class FsHistoryProviderSuite extends SparkFunSuite with BeforeAndAfter with Matchers with Logging {
import FsHistoryProvider._
private var testDir: File = null
before {

View file

@ -24,7 +24,7 @@ import java.io.FileOutputStream
import scala.collection.immutable.IndexedSeq
import org.apache.hadoop.io.Text
import org.apache.hadoop.io.compress.{CompressionCodecFactory, DefaultCodec, GzipCodec}
import org.apache.hadoop.io.compress.{CompressionCodecFactory, GzipCodec}
import org.scalatest.BeforeAndAfterAll
import org.apache.spark.{Logging, SparkConf, SparkContext, SparkFunSuite}

View file

@ -26,7 +26,6 @@ import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually._
import org.apache.spark._
import org.apache.spark.launcher._
class LauncherBackendSuite extends SparkFunSuite with Matchers {

View file

@ -17,10 +17,8 @@
package org.apache.spark.memory
import scala.collection.mutable
import org.apache.spark.SparkConf
import org.apache.spark.storage.{BlockId, BlockStatus}
import org.apache.spark.storage.BlockId
class TestMemoryManager(conf: SparkConf)
extends MemoryManager(conf, numCores = 1, Long.MaxValue, Long.MaxValue) {

View file

@ -20,7 +20,7 @@ package org.apache.spark.rpc
import java.io.{File, NotSerializableException}
import java.nio.charset.StandardCharsets.UTF_8
import java.util.UUID
import java.util.concurrent.{ConcurrentLinkedQueue, CountDownLatch, TimeoutException, TimeUnit}
import java.util.concurrent.{ConcurrentLinkedQueue, CountDownLatch, TimeUnit}
import scala.collection.mutable
import scala.collection.JavaConverters._

View file

@ -23,7 +23,7 @@ import java.util.concurrent.atomic.AtomicInteger
import org.mockito.Mockito._
import org.apache.spark.SparkFunSuite
import org.apache.spark.rpc.{RpcAddress, RpcEndpoint, RpcEnv, TestRpcEndpoint}
import org.apache.spark.rpc.{RpcAddress, TestRpcEndpoint}
class InboxSuite extends SparkFunSuite {

View file

@ -33,7 +33,7 @@ import org.scalatest.BeforeAndAfter
import org.apache.spark.{LocalSparkContext, SecurityManager, SparkConf, SparkContext, SparkFunSuite}
import org.apache.spark.network.shuffle.mesos.MesosExternalShuffleClient
import org.apache.spark.rpc.{RpcEndpointRef}
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler.TaskSchedulerImpl
class CoarseMesosSchedulerBackendSuite extends SparkFunSuite

View file

@ -21,7 +21,6 @@ import java.net.{BindException, ServerSocket}
import scala.io.Source
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.ServletContextHandler
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._

View file

@ -22,8 +22,6 @@ import java.util.Properties
import org.apache.commons.lang3.SerializationUtils
import org.scalatest.{BeforeAndAfterEach, Suite}
import org.apache.spark.SparkFunSuite
/**
* Mixin for automatically resetting system properties that are modified in ScalaTest tests.
* This resets the properties after each individual test.

View file

@ -19,7 +19,7 @@ package org.apache.spark.util
import scala.collection.mutable.ArrayBuffer
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, PrivateMethodTester}
import org.scalatest.{BeforeAndAfterEach, PrivateMethodTester}
import org.apache.spark.SparkFunSuite

View file

@ -17,12 +17,10 @@
package org.apache.spark.sql.jdbc
import java.math.BigDecimal
import java.sql.{Connection, Date, Timestamp}
import java.sql.Connection
import java.util.Properties
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.tags.DockerTest
/**

View file

@ -28,7 +28,6 @@ import org.apache.spark.api.java.function.Function2;
import org.apache.spark.api.java.function.PairFunction;
import java.io.Serializable;
import java.util.Collections;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

View file

@ -25,10 +25,8 @@ import scala.Tuple2;
import org.apache.spark.api.java.*;
import org.apache.spark.mllib.evaluation.MultilabelMetrics;
import org.apache.spark.rdd.RDD;
import org.apache.spark.SparkConf;
// $example off$
import org.apache.spark.SparkContext;
public class JavaMultiLabelClassificationMetricsExample {
public static void main(String[] args) {

View file

@ -23,7 +23,6 @@ import java.io.File
import scala.io.Source._
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Simple test for reading and writing to a distributed

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples
import java.util.Random
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Usage: GroupByTest [numMappers] [numKVPairs] [KeySize] [numReducers]

View file

@ -25,8 +25,6 @@ import scala.collection.mutable.HashSet
import breeze.linalg.{squaredDistance, DenseVector, Vector}
import org.apache.spark.SparkContext._
/**
* K-means clustering.
*

View file

@ -20,9 +20,6 @@ package org.apache.spark.examples
import scala.math.random
import org.apache.spark._
import org.apache.spark.SparkContext._
object LocalPi {
def main(args: Array[String]) {
var count = 0

View file

@ -19,7 +19,6 @@
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Executes a roll up-style query against Apache logs.

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples
import java.util.Random
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Usage: SimpleSkewedGroupByTest [numMappers] [numKVPairs] [valSize] [numReducers] [ratio]

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples
import java.util.Random
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Usage: GroupByTest [numMappers] [numKVPairs] [KeySize] [numReducers]

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples
import breeze.linalg.{squaredDistance, DenseVector, Vector}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* K-means clustering.

View file

@ -19,7 +19,6 @@
package org.apache.spark.examples
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Computes the PageRank of URLs from an input file. Input file should

View file

@ -22,7 +22,6 @@ import scala.collection.mutable
import scala.util.Random
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* Transitive closure on a graph.

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples.graphx
import java.io.{FileOutputStream, PrintWriter}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.graphx.{GraphXUtils, PartitionStrategy}
import org.apache.spark.graphx.util.GraphGenerators

View file

@ -18,15 +18,13 @@
// scalastyle:off println
package org.apache.spark.examples.ml
import scala.collection.mutable
import scala.language.reflectiveCalls
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.examples.mllib.AbstractParams
import org.apache.spark.ml.{Pipeline, PipelineStage}
import org.apache.spark.ml.regression.{LinearRegression, LinearRegressionModel}
import org.apache.spark.ml.regression.LinearRegression
import org.apache.spark.sql.DataFrame
/**

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples.mllib
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.linalg.distributed.{MatrixEntry, RowMatrix}

View file

@ -24,7 +24,6 @@ import org.apache.log4j.{Level, Logger}
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.{ALS, MatrixFactorizationModel, Rating}
import org.apache.spark.rdd.RDD

View file

@ -21,7 +21,6 @@ package org.apache.spark.examples.mllib
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.util.MLUtils
/**

View file

@ -19,7 +19,6 @@
package org.apache.spark.examples.streaming
import scala.collection.mutable.LinkedHashSet
import scala.reflect.ClassTag
import scala.util.Random
import akka.actor._

View file

@ -18,7 +18,7 @@
// scalastyle:off println
package org.apache.spark.examples.streaming
import java.io.{BufferedReader, InputStream, InputStreamReader}
import java.io.{BufferedReader, InputStreamReader}
import java.net.Socket
import org.apache.spark.{Logging, SparkConf}

View file

@ -18,10 +18,7 @@
// scalastyle:off println
package org.apache.spark.examples.streaming
import java.net.InetSocketAddress
import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming._
import org.apache.spark.streaming.flume._
import org.apache.spark.util.IntParam

View file

@ -24,7 +24,6 @@ import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SQLContext
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext, Time}
import org.apache.spark.util.IntParam
/**
* Use DataFrames and SQL to count words in UTF8 encoded, '\n' delimited text received from the

View file

@ -18,7 +18,6 @@
// scalastyle:off println
package org.apache.spark.examples.streaming
import org.apache.spark.HashPartitioner
import org.apache.spark.SparkConf
import org.apache.spark.streaming._

View file

@ -22,7 +22,6 @@ import com.twitter.algebird._
import com.twitter.algebird.CMSHasherImplicits._
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext._
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.twitter._

View file

@ -19,7 +19,6 @@
package org.apache.spark.examples.streaming
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.SparkContext._
import org.apache.spark.streaming.twitter._
import org.apache.spark.SparkConf

View file

@ -25,9 +25,8 @@ import akka.actor.actorRef2Scala
import akka.util.ByteString
import akka.zeromq._
import akka.zeromq.Subscribe
import com.typesafe.config.ConfigFactory
import org.apache.spark.{SparkConf, TaskContext}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.zeromq._

View file

@ -20,7 +20,6 @@ package org.apache.spark.streaming
import java.io.{IOException, ObjectInputStream}
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD

View file

@ -23,7 +23,7 @@ import scala.language.postfixOps
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.SparkFunSuite
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Milliseconds, StreamingContext}

View file

@ -22,8 +22,8 @@ import twitter4j.auth.Authorization
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.api.java.{JavaDStream, JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.api.java.{JavaReceiverInputDStream, JavaStreamingContext}
import org.apache.spark.streaming.dstream.ReceiverInputDStream
object TwitterUtils {
/**

View file

@ -17,15 +17,10 @@
package org.apache.spark.graphx
import scala.reflect.{classTag, ClassTag}
import scala.reflect.ClassTag
import scala.util.Random
import org.apache.spark.HashPartitioner
import org.apache.spark.SparkContext._
import org.apache.spark.SparkException
import org.apache.spark.graphx.impl.EdgePartitionBuilder
import org.apache.spark.graphx.impl.GraphImpl
import org.apache.spark.graphx.lib._
import org.apache.spark.rdd.RDD

View file

@ -20,7 +20,6 @@ package org.apache.spark.graphx
import scala.reflect.ClassTag
import org.apache.spark._
import org.apache.spark.SparkContext._
import org.apache.spark.graphx.impl.RoutingTablePartition
import org.apache.spark.graphx.impl.ShippableVertexPartition
import org.apache.spark.graphx.impl.VertexAttributeBlock

View file

@ -17,7 +17,7 @@
package org.apache.spark.graphx.impl
import scala.reflect.{classTag, ClassTag}
import scala.reflect.ClassTag
import org.apache.spark.graphx._
import org.apache.spark.graphx.util.collection.GraphXPrimitiveKeyOpenHashMap

View file

@ -17,9 +17,8 @@
package org.apache.spark.graphx.impl
import scala.reflect.{classTag, ClassTag}
import scala.reflect.ClassTag
import org.apache.spark.SparkContext._
import org.apache.spark.graphx._
import org.apache.spark.rdd.RDD

View file

@ -17,14 +17,8 @@
package org.apache.spark.graphx.impl
import scala.reflect.ClassTag
import org.apache.spark.Partitioner
import org.apache.spark.graphx._
import org.apache.spark.graphx.impl.RoutingTablePartition.RoutingTableMessage
import org.apache.spark.graphx.util.collection.GraphXPrimitiveKeyOpenHashMap
import org.apache.spark.rdd.RDD
import org.apache.spark.rdd.ShuffledRDD
import org.apache.spark.util.collection.{BitSet, PrimitiveVector}
private[graphx]

View file

@ -20,7 +20,6 @@ package org.apache.spark.graphx.impl
import scala.reflect.ClassTag
import org.apache.spark.graphx._
import org.apache.spark.graphx.util.collection.GraphXPrimitiveKeyOpenHashMap
import org.apache.spark.util.collection.BitSet
private[graphx] object VertexPartition {

Some files were not shown because too many files have changed in this diff Show more