[SPARK-19227][SPARK-19251] remove unused imports and outdated comments

## What changes were proposed in this pull request?
remove ununsed imports and outdated comments, and fix some minor code style issue.

## How was this patch tested?
existing ut

Author: uncleGen <hustyugm@gmail.com>

Closes #16591 from uncleGen/SPARK-19227.
This commit is contained in:
uncleGen 2017-01-18 09:44:32 +00:00 committed by Sean Owen
parent 4494cd9716
commit eefdf9f9dd
No known key found for this signature in database
GPG key ID: BEB3956D6717BDDC
47 changed files with 25 additions and 79 deletions

View file

@ -26,11 +26,9 @@ import javax.net.ssl._
import com.google.common.hash.HashCodes import com.google.common.hash.HashCodes
import com.google.common.io.Files import com.google.common.io.Files
import org.apache.hadoop.io.Text import org.apache.hadoop.io.Text
import org.apache.hadoop.security.Credentials
import org.apache.spark.deploy.SparkHadoopUtil import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.network.sasl.SecretKeyHolder import org.apache.spark.network.sasl.SecretKeyHolder
import org.apache.spark.util.Utils import org.apache.spark.util.Utils

View file

@ -19,7 +19,7 @@ package org.apache.spark
import java.io._ import java.io._
import java.lang.reflect.Constructor import java.lang.reflect.Constructor
import java.net.{URI} import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID} import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference} import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}

View file

@ -19,7 +19,7 @@ package org.apache.spark.deploy
import javax.annotation.concurrent.ThreadSafe import javax.annotation.concurrent.ThreadSafe
import com.codahale.metrics.{Gauge, MetricRegistry} import com.codahale.metrics.MetricRegistry
import org.apache.spark.metrics.source.Source import org.apache.spark.metrics.source.Source
import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler import org.apache.spark.network.shuffle.ExternalShuffleBlockHandler

View file

@ -17,7 +17,7 @@
package org.apache.spark.deploy package org.apache.spark.deploy
import java.io.{File, IOException, PrintStream} import java.io.{File, IOException}
import java.lang.reflect.{InvocationTargetException, Modifier, UndeclaredThrowableException} import java.lang.reflect.{InvocationTargetException, Modifier, UndeclaredThrowableException}
import java.net.URL import java.net.URL
import java.security.PrivilegedExceptionAction import java.security.PrivilegedExceptionAction

View file

@ -22,8 +22,6 @@ import javax.servlet.http.HttpServletRequest
import scala.xml.{Node, Unparsed} import scala.xml.{Node, Unparsed}
import com.google.common.cache.{CacheBuilder, CacheLoader, LoadingCache}
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.ui.{UIUtils, WebUIPage} import org.apache.spark.ui.{UIUtils, WebUIPage}
import org.apache.spark.util.Utils import org.apache.spark.util.Utils

View file

@ -17,12 +17,6 @@
package org.apache.spark.internal.config package org.apache.spark.internal.config
import java.util.{Map => JMap}
import scala.util.matching.Regex
import org.apache.spark.SparkConf
/** /**
* An entry contains all meta information for a configuration. * An entry contains all meta information for a configuration.
* *
@ -34,7 +28,6 @@ import org.apache.spark.SparkConf
* value declared as a string. * value declared as a string.
* *
* @param key the key for the configuration * @param key the key for the configuration
* @param defaultValue the default value for the configuration
* @param valueConverter how to convert a string to the value. It should throw an exception if the * @param valueConverter how to convert a string to the value. It should throw an exception if the
* string does not have the required format. * string does not have the required format.
* @param stringConverter how to convert a value to a string that the user can use it as a valid * @param stringConverter how to convert a value to a string that the user can use it as a valid
@ -76,7 +69,7 @@ private class ConfigEntryWithDefault[T] (
stringConverter: T => String, stringConverter: T => String,
doc: String, doc: String,
isPublic: Boolean) isPublic: Boolean)
extends ConfigEntry(key, valueConverter, stringConverter, doc, isPublic) { extends ConfigEntry(key, valueConverter, stringConverter, doc, isPublic) {
override def defaultValue: Option[T] = Some(_defaultValue) override def defaultValue: Option[T] = Some(_defaultValue)
@ -95,7 +88,7 @@ private class ConfigEntryWithDefaultString[T] (
stringConverter: T => String, stringConverter: T => String,
doc: String, doc: String,
isPublic: Boolean) isPublic: Boolean)
extends ConfigEntry(key, valueConverter, stringConverter, doc, isPublic) { extends ConfigEntry(key, valueConverter, stringConverter, doc, isPublic) {
override def defaultValue: Option[T] = Some(valueConverter(_defaultValue)) override def defaultValue: Option[T] = Some(valueConverter(_defaultValue))
@ -118,8 +111,8 @@ private[spark] class OptionalConfigEntry[T](
val rawStringConverter: T => String, val rawStringConverter: T => String,
doc: String, doc: String,
isPublic: Boolean) isPublic: Boolean)
extends ConfigEntry[Option[T]](key, s => Some(rawValueConverter(s)), extends ConfigEntry[Option[T]](key, s => Some(rawValueConverter(s)),
v => v.map(rawStringConverter).orNull, doc, isPublic) { v => v.map(rawStringConverter).orNull, doc, isPublic) {
override def defaultValueString: String = "<undefined>" override def defaultValueString: String = "<undefined>"
@ -137,7 +130,7 @@ private class FallbackConfigEntry[T] (
doc: String, doc: String,
isPublic: Boolean, isPublic: Boolean,
private[config] val fallback: ConfigEntry[T]) private[config] val fallback: ConfigEntry[T])
extends ConfigEntry[T](key, fallback.valueConverter, fallback.stringConverter, doc, isPublic) { extends ConfigEntry[T](key, fallback.valueConverter, fallback.stringConverter, doc, isPublic) {
override def defaultValueString: String = s"<value of ${fallback.key}>" override def defaultValueString: String = s"<value of ${fallback.key}>"

View file

@ -18,7 +18,6 @@
package org.apache.spark.internal.config package org.apache.spark.internal.config
import java.util.{Map => JMap} import java.util.{Map => JMap}
import java.util.regex.Pattern
import scala.collection.mutable.HashMap import scala.collection.mutable.HashMap
import scala.util.matching.Regex import scala.util.matching.Regex

View file

@ -19,11 +19,10 @@ package org.apache.spark.rpc
import java.util.concurrent.TimeoutException import java.util.concurrent.TimeoutException
import scala.concurrent.{Await, Future} import scala.concurrent.Future
import scala.concurrent.duration._ import scala.concurrent.duration._
import scala.util.control.NonFatal
import org.apache.spark.{SparkConf, SparkException} import org.apache.spark.SparkConf
import org.apache.spark.util.{ThreadUtils, Utils} import org.apache.spark.util.{ThreadUtils, Utils}
/** /**

View file

@ -24,7 +24,6 @@ import java.util.Properties
import org.apache.spark._ import org.apache.spark._
import org.apache.spark.broadcast.Broadcast import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
/** /**

View file

@ -25,7 +25,6 @@ import scala.language.existentials
import org.apache.spark._ import org.apache.spark._
import org.apache.spark.broadcast.Broadcast import org.apache.spark.broadcast.Broadcast
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.shuffle.ShuffleWriter import org.apache.spark.shuffle.ShuffleWriter

View file

@ -19,7 +19,6 @@ package org.apache.spark.scheduler
import scala.collection.mutable.HashSet import scala.collection.mutable.HashSet
import org.apache.spark._
import org.apache.spark.executor.TaskMetrics import org.apache.spark.executor.TaskMetrics
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD

View file

@ -46,7 +46,6 @@ import org.apache.spark.util._
* @param stageId id of the stage this task belongs to * @param stageId id of the stage this task belongs to
* @param stageAttemptId attempt id of the stage this task belongs to * @param stageAttemptId attempt id of the stage this task belongs to
* @param partitionId index of the number in the RDD * @param partitionId index of the number in the RDD
* @param metrics a `TaskMetrics` that is created at driver side and sent to executor side.
* @param localProperties copy of thread-local properties set by the user on the driver side. * @param localProperties copy of thread-local properties set by the user on the driver side.
* @param serializedTaskMetrics a `TaskMetrics` that is created and serialized on the driver side * @param serializedTaskMetrics a `TaskMetrics` that is created and serialized on the driver side
* and sent to executor side. * and sent to executor side.

View file

@ -23,7 +23,6 @@ import javax.annotation.concurrent.NotThreadSafe
import scala.reflect.ClassTag import scala.reflect.ClassTag
import org.apache.spark.SparkEnv
import org.apache.spark.annotation.{DeveloperApi, Private} import org.apache.spark.annotation.{DeveloperApi, Private}
import org.apache.spark.util.NextIterator import org.apache.spark.util.NextIterator

View file

@ -23,7 +23,6 @@ import java.nio.ByteBuffer
import scala.reflect.ClassTag import scala.reflect.ClassTag
import org.apache.spark.SparkConf import org.apache.spark.SparkConf
import org.apache.spark.internal.config._
import org.apache.spark.io.CompressionCodec import org.apache.spark.io.CompressionCodec
import org.apache.spark.security.CryptoStreamUtils import org.apache.spark.security.CryptoStreamUtils
import org.apache.spark.storage._ import org.apache.spark.storage._

View file

@ -16,7 +16,7 @@
*/ */
package org.apache.spark.status.api.v1 package org.apache.spark.status.api.v1
import javax.ws.rs.{GET, PathParam, Produces} import javax.ws.rs.{GET, Produces}
import javax.ws.rs.core.MediaType import javax.ws.rs.core.MediaType
import org.apache.spark.ui.SparkUI import org.apache.spark.ui.SparkUI

View file

@ -31,7 +31,7 @@ import org.apache.spark.util.Utils
* ExternalBlockStore, whether to keep the data in memory in a serialized format, and whether * ExternalBlockStore, whether to keep the data in memory in a serialized format, and whether
* to replicate the RDD partitions on multiple nodes. * to replicate the RDD partitions on multiple nodes.
* *
* The [[org.apache.spark.storage.StorageLevel$]] singleton object contains some static constants * The [[org.apache.spark.storage.StorageLevel]] singleton object contains some static constants
* for commonly useful storage levels. To create your own storage level object, use the * for commonly useful storage levels. To create your own storage level object, use the
* factory method of the singleton object (`StorageLevel(...)`). * factory method of the singleton object (`StorageLevel(...)`).
*/ */

View file

@ -19,7 +19,6 @@ package org.apache.spark.util.random
import java.util.Random import java.util.Random
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag import scala.reflect.ClassTag
import org.apache.commons.math3.distribution.PoissonDistribution import org.apache.commons.math3.distribution.PoissonDistribution

View file

@ -17,7 +17,6 @@
package org.apache.spark package org.apache.spark
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.ArrayBuffer
import org.apache.spark.executor.TaskMetrics import org.apache.spark.executor.TaskMetrics

View file

@ -23,7 +23,6 @@ import org.mockito.Mockito.mock
import org.scalatest._ import org.scalatest._
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite} import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
import org.apache.spark.internal.config._
import org.apache.spark.network.BlockDataManager import org.apache.spark.network.BlockDataManager
class NettyBlockTransferServiceSuite class NettyBlockTransferServiceSuite

View file

@ -21,10 +21,6 @@ import org.scalatest.Matchers
import org.apache.spark.SparkFunSuite import org.apache.spark.SparkFunSuite
/**
*
*/
class DistributionSuite extends SparkFunSuite with Matchers { class DistributionSuite extends SparkFunSuite with Matchers {
test("summary") { test("summary") {
val d = new Distribution((1 to 100).toArray.map{_.toDouble}) val d = new Distribution((1 to 100).toArray.map{_.toDouble})

View file

@ -21,7 +21,7 @@ package org.apache.spark.examples.ml
// $example on$ // $example on$
import org.apache.spark.ml.feature.Binarizer import org.apache.spark.ml.feature.Binarizer
// $example off$ // $example off$
import org.apache.spark.sql.{SparkSession} import org.apache.spark.sql.SparkSession
object BinarizerExample { object BinarizerExample {
def main(args: Array[String]): Unit = { def main(args: Array[String]): Unit = {

View file

@ -16,10 +16,6 @@
*/ */
package org.apache.spark.examples.sql package org.apache.spark.examples.sql
// $example on:schema_inferring$
import org.apache.spark.sql.catalyst.encoders.ExpressionEncoder
import org.apache.spark.sql.Encoder
// $example off:schema_inferring$
import org.apache.spark.sql.Row import org.apache.spark.sql.Row
// $example on:init_session$ // $example on:init_session$
import org.apache.spark.sql.SparkSession import org.apache.spark.sql.SparkSession

View file

@ -18,7 +18,6 @@
// scalastyle:off println // scalastyle:off println
package org.apache.spark.examples.sql.streaming package org.apache.spark.examples.sql.streaming
import org.apache.spark.sql.functions._
import org.apache.spark.sql.SparkSession import org.apache.spark.sql.SparkSession
/** /**

View file

@ -24,7 +24,7 @@ import java.nio.charset.StandardCharsets
import scala.collection.JavaConverters._ import scala.collection.JavaConverters._
import scala.util.control.NonFatal import scala.util.control.NonFatal
import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsumer, OffsetOutOfRangeException} import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener import org.apache.kafka.clients.consumer.internals.NoOpConsumerRebalanceListener
import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.TopicPartition

View file

@ -24,7 +24,7 @@ import scala.collection.mutable.ArrayBuffer
import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord } import org.apache.kafka.clients.consumer.{ ConsumerConfig, ConsumerRecord }
import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.TopicPartition
import org.apache.spark.{Partition, SparkContext, SparkException, TaskContext} import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.partial.{BoundedDouble, PartialResult} import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD

View file

@ -25,7 +25,6 @@ import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkContext import org.apache.spark.SparkContext
import org.apache.spark.annotation.Experimental import org.apache.spark.annotation.Experimental
import org.apache.spark.api.java.{ JavaRDD, JavaSparkContext } import org.apache.spark.api.java.{ JavaRDD, JavaSparkContext }
import org.apache.spark.api.java.function.{ Function0 => JFunction0 }
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.StreamingContext import org.apache.spark.streaming.StreamingContext

View file

@ -22,7 +22,7 @@ import java.nio.ByteBuffer
import scala.util.Random import scala.util.Random
import com.amazonaws.auth.{BasicAWSCredentials, DefaultAWSCredentialsProviderChain} import com.amazonaws.auth.DefaultAWSCredentialsProviderChain
import com.amazonaws.regions.RegionUtils import com.amazonaws.regions.RegionUtils
import com.amazonaws.services.kinesis.AmazonKinesisClient import com.amazonaws.services.kinesis.AmazonKinesisClient
import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream import com.amazonaws.services.kinesis.clientlibrary.lib.worker.InitialPositionInStream

View file

@ -26,7 +26,7 @@ import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.streaming.Duration import org.apache.spark.streaming.Duration
import org.apache.spark.streaming.util.RecurringTimer import org.apache.spark.streaming.util.RecurringTimer
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils} import org.apache.spark.util.{Clock, SystemClock}
/** /**
* This is a helper class for managing Kinesis checkpointing. * This is a helper class for managing Kinesis checkpointing.

View file

@ -17,7 +17,7 @@
package org.apache.spark.streaming.kinesis package org.apache.spark.streaming.kinesis
import java.util.concurrent.{ExecutorService, TimeoutException} import java.util.concurrent.TimeoutException
import scala.concurrent.{Await, ExecutionContext, Future} import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._ import scala.concurrent.duration._
@ -30,7 +30,6 @@ import org.mockito.invocation.InvocationOnMock
import org.mockito.stubbing.Answer import org.mockito.stubbing.Answer
import org.scalatest.{BeforeAndAfterEach, PrivateMethodTester} import org.scalatest.{BeforeAndAfterEach, PrivateMethodTester}
import org.scalatest.concurrent.Eventually import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.Eventually._
import org.scalatest.mock.MockitoSugar import org.scalatest.mock.MockitoSugar
import org.apache.spark.streaming.{Duration, TestSuiteBase} import org.apache.spark.streaming.{Duration, TestSuiteBase}

View file

@ -23,11 +23,8 @@ import java.util.List;
import org.junit.Before; import org.junit.Before;
import org.junit.Test; import org.junit.Test;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*; import static org.mockito.Mockito.*;
import static org.apache.spark.launcher.SparkSubmitOptionParser.*;
public class SparkSubmitOptionParserSuite extends BaseSuite { public class SparkSubmitOptionParserSuite extends BaseSuite {
private SparkSubmitOptionParser parser; private SparkSubmitOptionParser parser;

View file

@ -27,7 +27,7 @@ import org.apache.spark.ml.util.{MetadataUtils, SchemaUtils}
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row} import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._ import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{DataType, DoubleType, StructType} import org.apache.spark.sql.types.{DataType, StructType}
/** /**
* (private[spark]) Params for classification. * (private[spark]) Params for classification.

View file

@ -37,7 +37,6 @@ import org.apache.spark.mllib.tree.model.{GradientBoostedTreesModel => OldGBTMod
import org.apache.spark.rdd.RDD import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row} import org.apache.spark.sql.{DataFrame, Dataset, Row}
import org.apache.spark.sql.functions._ import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.DoubleType
/** /**
* Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting) * Gradient-Boosted Trees (GBTs) (http://en.wikipedia.org/wiki/Gradient_boosting)

View file

@ -25,7 +25,7 @@ import org.apache.hadoop.mapreduce.{Job, TaskAttemptContext}
import org.apache.spark.TaskContext import org.apache.spark.TaskContext
import org.apache.spark.ml.feature.LabeledPoint import org.apache.spark.ml.feature.LabeledPoint
import org.apache.spark.ml.linalg.{Vector, Vectors, VectorUDT} import org.apache.spark.ml.linalg.{Vectors, VectorUDT}
import org.apache.spark.mllib.util.MLUtils import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.InternalRow

View file

@ -17,8 +17,8 @@
package org.apache.spark.repl package org.apache.spark.repl
import scala.tools.nsc.{Settings, CompilerCommand} import scala.tools.nsc.{CompilerCommand, Settings}
import scala.Predef._
import org.apache.spark.annotation.DeveloperApi import org.apache.spark.annotation.DeveloperApi
/** /**

View file

@ -10,8 +10,6 @@ package org.apache.spark.repl
import scala.tools.nsc._ import scala.tools.nsc._
import scala.tools.nsc.interpreter._ import scala.tools.nsc.interpreter._
import scala.reflect.internal.util.Position
import scala.util.control.Exception.ignoring
import scala.tools.nsc.util.stackTraceString import scala.tools.nsc.util.stackTraceString
import org.apache.spark.SPARK_VERSION import org.apache.spark.SPARK_VERSION

View file

@ -19,8 +19,6 @@ package org.apache.spark.deploy.yarn
import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.ArrayBuffer
import org.apache.spark.util.{IntParam, MemoryParam}
class ApplicationMasterArguments(val args: Array[String]) { class ApplicationMasterArguments(val args: Array[String]) {
var userJar: String = null var userJar: String = null
var userClass: String = null var userClass: String = null

View file

@ -18,10 +18,9 @@
package org.apache.spark.deploy.yarn.security package org.apache.spark.deploy.yarn.security
import org.apache.hadoop.conf.Configuration import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.scalatest.{Matchers, PrivateMethodTester} import org.scalatest.{Matchers, PrivateMethodTester}
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite} import org.apache.spark.{SparkException, SparkFunSuite}
class HadoopFSCredentialProviderSuite class HadoopFSCredentialProviderSuite
extends SparkFunSuite extends SparkFunSuite

View file

@ -17,7 +17,6 @@
package org.apache.spark.sql.catalyst.expressions package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.TaskContext
import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.types.{DataType, LongType} import org.apache.spark.sql.types.{DataType, LongType}

View file

@ -21,7 +21,6 @@ import java.nio.ByteBuffer
import com.google.common.primitives.{Doubles, Ints, Longs} import com.google.common.primitives.{Doubles, Ints, Longs}
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess} import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess}

View file

@ -20,7 +20,6 @@ package org.apache.spark.sql.catalyst.expressions.aggregate
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream} import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream}
import java.util import java.util
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess} import org.apache.spark.sql.catalyst.analysis.TypeCheckResult.{TypeCheckFailure, TypeCheckSuccess}

View file

@ -17,8 +17,6 @@
package org.apache.spark.sql.catalyst.expressions.aggregate package org.apache.spark.sql.catalyst.expressions.aggregate
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataInputStream, DataOutputStream}
import scala.collection.generic.Growable import scala.collection.generic.Growable
import scala.collection.mutable import scala.collection.mutable

View file

@ -23,7 +23,6 @@ import scala.util.parsing.combinator.RegexParsers
import com.fasterxml.jackson.core._ import com.fasterxml.jackson.core._
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback import org.apache.spark.sql.catalyst.expressions.codegen.CodegenFallback
import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.InternalRow

View file

@ -18,7 +18,7 @@
package org.apache.spark.sql.catalyst.expressions package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, TypeCoercion} import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.util.TypeUtils import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.types._ import org.apache.spark.sql.types._

View file

@ -17,15 +17,11 @@
package org.apache.spark.sql.catalyst.planning package org.apache.spark.sql.catalyst.planning
import scala.annotation.tailrec
import scala.collection.mutable
import org.apache.spark.internal.Logging import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans._ import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.types.IntegerType
/** /**
* A pattern that matches any number of project or filter operations on top of another relational * A pattern that matches any number of project or filter operations on top of another relational

View file

@ -17,7 +17,6 @@
package org.apache.spark.sql.catalyst.plans package org.apache.spark.sql.catalyst.plans
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.expressions.Attribute
object JoinType { object JoinType {

View file

@ -17,7 +17,7 @@
package org.apache.spark.sql.catalyst.plans.logical package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression} import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.types.MetadataBuilder import org.apache.spark.sql.types.MetadataBuilder
import org.apache.spark.unsafe.types.CalendarInterval import org.apache.spark.unsafe.types.CalendarInterval

View file

@ -36,7 +36,6 @@ import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.{AttributeMap, AttributeReference, Expression} import org.apache.spark.sql.catalyst.expressions.{AttributeMap, AttributeReference, Expression}
import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics} import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, Statistics}
import org.apache.spark.sql.execution.FileRelation import org.apache.spark.sql.execution.FileRelation
import org.apache.spark.sql.hive.client.HiveClient
import org.apache.spark.sql.types.StructField import org.apache.spark.sql.types.StructField