Revert "[SPARK-12692][BUILD][SQL] Scala style: Fix the style violation (Space before "," or ":")"

This reverts commit 8cfa218f4f.
This commit is contained in:
Reynold Xin 2016-01-12 12:56:52 -08:00
parent 0ed430e315
commit 0d543b98f3
54 changed files with 141 additions and 150 deletions

View file

@ -218,7 +218,7 @@ This file is divided into 3 sections:
<check level="error" class="org.scalastyle.scalariform.EqualsHashCodeChecker" enabled="false"></check>
<!-- Should turn this on, but we have a few places that need to be fixed first -->
<check customId="whitespacebeforetoken" level="warning" class="org.scalastyle.scalariform.DisallowSpaceBeforeTokenChecker" enabled="true">
<check level="warning" class="org.scalastyle.scalariform.DisallowSpaceBeforeTokenChecker" enabled="true">
<parameters>
<parameter name="tokens">COLON, COMMA</parameter>
</parameters>

View file

@ -49,7 +49,7 @@ object ScalaReflection extends ScalaReflection {
* Unlike `schemaFor`, this function doesn't do any massaging of types into the Spark SQL type
* system. As a result, ObjectType will be returned for things like boxed Integers
*/
def dataTypeFor[T: TypeTag]: DataType = dataTypeFor(localTypeOf[T])
def dataTypeFor[T : TypeTag]: DataType = dataTypeFor(localTypeOf[T])
private def dataTypeFor(tpe: `Type`): DataType = ScalaReflectionLock.synchronized {
tpe match {
@ -116,7 +116,7 @@ object ScalaReflection extends ScalaReflection {
* from ordinal 0 (since there are no names to map to). The actual location can be moved by
* calling resolve/bind with a new schema.
*/
def constructorFor[T: TypeTag]: Expression = {
def constructorFor[T : TypeTag]: Expression = {
val tpe = localTypeOf[T]
val clsName = getClassNameFromType(tpe)
val walkedTypePath = s"""- root class: "${clsName}"""" :: Nil
@ -386,7 +386,7 @@ object ScalaReflection extends ScalaReflection {
* * the element type of [[Array]] or [[Seq]]: `array element class: "abc.xyz.MyClass"`
* * the field of [[Product]]: `field (class: "abc.xyz.MyClass", name: "myField")`
*/
def extractorsFor[T: TypeTag](inputObject: Expression): CreateNamedStruct = {
def extractorsFor[T : TypeTag](inputObject: Expression): CreateNamedStruct = {
val tpe = localTypeOf[T]
val clsName = getClassNameFromType(tpe)
val walkedTypePath = s"""- root class: "${clsName}"""" :: Nil

View file

@ -203,7 +203,7 @@ object SqlParser extends AbstractSparkSQLParser with DataTypeParser {
)
protected lazy val ordering: Parser[Seq[SortOrder]] =
( rep1sep(expression ~ direction.?, ",") ^^ {
( rep1sep(expression ~ direction.? , ",") ^^ {
case exps => exps.map(pair => SortOrder(pair._1, pair._2.getOrElse(Ascending)))
}
)

View file

@ -84,7 +84,7 @@ class Analyzer(
ResolveAggregateFunctions ::
DistinctAggregationRewriter(conf) ::
HiveTypeCoercion.typeCoercionRules ++
extendedResolutionRules: _*),
extendedResolutionRules : _*),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
@ -110,7 +110,7 @@ class Analyzer(
// Taking into account the reasonableness and the implementation complexity,
// here use the CTE definition first, check table name only and ignore database name
// see https://github.com/apache/spark/pull/4929#discussion_r27186638 for more info
case u: UnresolvedRelation =>
case u : UnresolvedRelation =>
val substituted = cteRelations.get(u.tableIdentifier.table).map { relation =>
val withAlias = u.alias.map(Subquery(_, relation))
withAlias.getOrElse(relation)
@ -889,7 +889,7 @@ class Analyzer(
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf: WindowFunction =>
case wf : WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)

View file

@ -323,13 +323,13 @@ object FunctionRegistry {
} else {
// Otherwise, find an ctor method that matches the number of arguments, and use that.
val params = Seq.fill(expressions.size)(classOf[Expression])
val f = Try(tag.runtimeClass.getDeclaredConstructor(params: _*)) match {
val f = Try(tag.runtimeClass.getDeclaredConstructor(params : _*)) match {
case Success(e) =>
e
case Failure(e) =>
throw new AnalysisException(s"Invalid number of arguments for function $name")
}
Try(f.newInstance(expressions: _*).asInstanceOf[Expression]) match {
Try(f.newInstance(expressions : _*).asInstanceOf[Expression]) match {
case Success(e) => e
case Failure(e) => throw new AnalysisException(e.getMessage)
}

View file

@ -529,7 +529,7 @@ object HiveTypeCoercion {
if falseValues.contains(value) => And(IsNotNull(bool), Not(bool))
case EqualTo(left @ BooleanType(), right @ NumericType()) =>
transform(left, right)
transform(left , right)
case EqualTo(left @ NumericType(), right @ BooleanType()) =>
transform(right, left)
case EqualNullSafe(left @ BooleanType(), right @ NumericType()) =>

View file

@ -61,11 +61,9 @@ package object dsl {
trait ImplicitOperators {
def expr: Expression
// scalastyle:off whitespacebeforetoken
def unary_- : Expression = UnaryMinus(expr)
def unary_! : Predicate = Not(expr)
def unary_~ : Expression = BitwiseNot(expr)
// scalastyle:on whitespacebeforetoken
def + (other: Expression): Expression = Add(expr, other)
def - (other: Expression): Expression = Subtract(expr, other)
@ -143,7 +141,7 @@ package object dsl {
// Note that if we make ExpressionConversions an object rather than a trait, we can
// then make this a value class to avoid the small penalty of runtime instantiation.
def $(args: Any*): analysis.UnresolvedAttribute = {
analysis.UnresolvedAttribute(sc.s(args: _*))
analysis.UnresolvedAttribute(sc.s(args : _*))
}
}

View file

@ -44,7 +44,7 @@ import org.apache.spark.util.Utils
* to the name `value`.
*/
object ExpressionEncoder {
def apply[T: TypeTag](): ExpressionEncoder[T] = {
def apply[T : TypeTag](): ExpressionEncoder[T] = {
// We convert the not-serializable TypeTag into StructType and ClassTag.
val mirror = typeTag[T].mirror
val cls = mirror.runtimeClass(typeTag[T].tpe)

View file

@ -27,7 +27,7 @@ package object encoders {
* references from a specific schema.) This requirement allows us to preserve whether a given
* object type is being bound by name or by ordinal when doing resolution.
*/
private[sql] def encoderFor[A: Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
private[sql] def encoderFor[A : Encoder]: ExpressionEncoder[A] = implicitly[Encoder[A]] match {
case e: ExpressionEncoder[A] =>
e.assertUnresolved()
e

View file

@ -164,7 +164,7 @@ abstract class Expression extends TreeNode[Expression] {
* Returns the hash for this expression. Expressions that compute the same result, even if
* they differ cosmetically should return the same hash.
*/
def semanticHash(): Int = {
def semanticHash() : Int = {
def computeHash(e: Seq[Any]): Int = {
// See http://stackoverflow.com/questions/113511/hash-code-implementation
var hash: Int = 17

View file

@ -46,7 +46,7 @@ case class Concat(children: Seq[Expression]) extends Expression with ImplicitCas
override def eval(input: InternalRow): Any = {
val inputs = children.map(_.eval(input).asInstanceOf[UTF8String])
UTF8String.concat(inputs: _*)
UTF8String.concat(inputs : _*)
}
override protected def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
@ -99,7 +99,7 @@ case class ConcatWs(children: Seq[Expression])
case null => Iterator(null.asInstanceOf[UTF8String])
}
}
UTF8String.concatWs(flatInputs.head, flatInputs.tail: _*)
UTF8String.concatWs(flatInputs.head, flatInputs.tail : _*)
}
override protected def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
@ -990,7 +990,7 @@ case class FormatNumber(x: Expression, d: Expression)
def typeHelper(p: String): String = {
x.dataType match {
case _: DecimalType => s"""$p.toJavaBigDecimal()"""
case _ : DecimalType => s"""$p.toJavaBigDecimal()"""
case _ => s"$p"
}
}

View file

@ -496,7 +496,7 @@ case class MapPartitions[T, U](
/** Factory for constructing new `AppendColumn` nodes. */
object AppendColumns {
def apply[T, U: Encoder](
def apply[T, U : Encoder](
func: T => U,
tEncoder: ExpressionEncoder[T],
child: LogicalPlan): AppendColumns[T, U] = {
@ -522,7 +522,7 @@ case class AppendColumns[T, U](
/** Factory for constructing new `MapGroups` nodes. */
object MapGroups {
def apply[K, T, U: Encoder](
def apply[K, T, U : Encoder](
func: (K, Iterator[T]) => TraversableOnce[U],
kEncoder: ExpressionEncoder[K],
tEncoder: ExpressionEncoder[T],
@ -557,7 +557,7 @@ case class MapGroups[K, T, U](
/** Factory for constructing new `CoGroup` nodes. */
object CoGroup {
def apply[Key, Left, Right, Result: Encoder](
def apply[Key, Left, Right, Result : Encoder](
func: (Key, Iterator[Left], Iterator[Right]) => TraversableOnce[Result],
keyEnc: ExpressionEncoder[Key],
leftEnc: ExpressionEncoder[Left],

View file

@ -122,7 +122,7 @@ object NumberConverter {
* unsigned, otherwise it is signed.
* NB: This logic is borrowed from org.apache.hadoop.hive.ql.ud.UDFConv
*/
def convert(n: Array[Byte], fromBase: Int, toBase: Int ): UTF8String = {
def convert(n: Array[Byte] , fromBase: Int, toBase: Int ): UTF8String = {
if (fromBase < Character.MIN_RADIX || fromBase > Character.MAX_RADIX
|| Math.abs(toBase) < Character.MIN_RADIX
|| Math.abs(toBase) > Character.MAX_RADIX) {

View file

@ -90,7 +90,7 @@ case class ArrayType(elementType: DataType, containsNull: Boolean) extends DataT
private[sql] lazy val interpretedOrdering: Ordering[ArrayData] = new Ordering[ArrayData] {
private[this] val elementOrdering: Ordering[Any] = elementType match {
case dt: AtomicType => dt.ordering.asInstanceOf[Ordering[Any]]
case a: ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case a : ArrayType => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case s: StructType => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
case other =>
throw new IllegalArgumentException(s"Type $other does not support ordered operations")

View file

@ -310,7 +310,6 @@ final class Decimal extends Ordered[Decimal] with Serializable {
def remainder(that: Decimal): Decimal = this % that
// scalastyle:off whitespacebeforetoken
def unary_- : Decimal = {
if (decimalVal.ne(null)) {
Decimal(-decimalVal, precision, scale)
@ -318,7 +317,6 @@ final class Decimal extends Ordered[Decimal] with Serializable {
Decimal(-longVal, precision, scale)
}
}
// scalastyle:on whitespacebeforetoken
def abs: Decimal = if (this.compare(Decimal.ZERO) < 0) this.unary_- else this

View file

@ -98,5 +98,5 @@ class EncoderErrorMessageSuite extends SparkFunSuite {
s"""array element class: "${clsName[NonEncodable]}""""))
}
private def clsName[T: ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName
private def clsName[T : ClassTag]: String = implicitly[ClassTag[T]].runtimeClass.getName
}

View file

@ -80,7 +80,7 @@ class JavaSerializable(val value: Int) extends Serializable {
class ExpressionEncoderSuite extends SparkFunSuite {
OuterScopes.outerScopes.put(getClass.getName, this)
implicit def encoder[T: TypeTag]: ExpressionEncoder[T] = ExpressionEncoder()
implicit def encoder[T : TypeTag]: ExpressionEncoder[T] = ExpressionEncoder()
// test flat encoders
encodeDecodeTest(false, "primitive boolean")
@ -145,7 +145,7 @@ class ExpressionEncoderSuite extends SparkFunSuite {
encoderFor(Encoders.javaSerialization[JavaSerializable]))
// test product encoders
private def productTest[T <: Product: ExpressionEncoder](input: T): Unit = {
private def productTest[T <: Product : ExpressionEncoder](input: T): Unit = {
encodeDecodeTest(input, input.getClass.getSimpleName)
}
@ -286,7 +286,7 @@ class ExpressionEncoderSuite extends SparkFunSuite {
}
}
private def encodeDecodeTest[T: ExpressionEncoder](
private def encodeDecodeTest[T : ExpressionEncoder](
input: T,
testName: String): Unit = {
test(s"encode/decode for $testName: $input") {

View file

@ -80,7 +80,7 @@ class BooleanSimplificationSuite extends PlanTest with PredicateHelper {
checkCondition(('a < 2 || 'a > 3 || 'b > 5) && 'a < 2, 'a < 2)
checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5), 'a < 2)
checkCondition('a < 2 && ('a < 2 || 'a > 3 || 'b > 5) , 'a < 2)
checkCondition(('a < 2 || 'b > 3) && ('a < 2 || 'c > 5), 'a < 2 || ('b > 3 && 'c > 5))

View file

@ -152,7 +152,7 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* results into the correct JVM types.
* @since 1.6.0
*/
def as[U: Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
def as[U : Encoder]: TypedColumn[Any, U] = new TypedColumn[Any, U](expr, encoderFor[U])
/**
* Extracts a value or values from a complex type.
@ -171,7 +171,6 @@ class Column(protected[sql] val expr: Expression) extends Logging {
UnresolvedExtractValue(expr, lit(extraction).expr)
}
// scalastyle:off whitespacebeforetoken
/**
* Unary minus, i.e. negate the expression.
* {{{
@ -203,7 +202,6 @@ class Column(protected[sql] val expr: Expression) extends Logging {
* @since 1.3.0
*/
def unary_! : Column = withExpr { Not(expr) }
// scalastyle:on whitespacebeforetoken
/**
* Equality test.

View file

@ -204,7 +204,7 @@ class DataFrame private[sql](
* @since 1.6.0
*/
@Experimental
def as[U: Encoder]: Dataset[U] = new Dataset[U](sqlContext, logicalPlan)
def as[U : Encoder]: Dataset[U] = new Dataset[U](sqlContext, logicalPlan)
/**
* Returns a new [[DataFrame]] with columns renamed. This can be quite convenient in conversion
@ -227,7 +227,7 @@ class DataFrame private[sql](
val newCols = logicalPlan.output.zip(colNames).map { case (oldAttribute, newName) =>
Column(oldAttribute).as(newName)
}
select(newCols: _*)
select(newCols : _*)
}
/**
@ -579,7 +579,7 @@ class DataFrame private[sql](
*/
@scala.annotation.varargs
def sortWithinPartitions(sortCol: String, sortCols: String*): DataFrame = {
sortWithinPartitions((sortCol +: sortCols).map(Column(_)): _*)
sortWithinPartitions((sortCol +: sortCols).map(Column(_)) : _*)
}
/**
@ -608,7 +608,7 @@ class DataFrame private[sql](
*/
@scala.annotation.varargs
def sort(sortCol: String, sortCols: String*): DataFrame = {
sort((sortCol +: sortCols).map(apply): _*)
sort((sortCol +: sortCols).map(apply) : _*)
}
/**
@ -631,7 +631,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols: _*)
def orderBy(sortCol: String, sortCols: String*): DataFrame = sort(sortCol, sortCols : _*)
/**
* Returns a new [[DataFrame]] sorted by the given expressions.
@ -640,7 +640,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs: _*)
def orderBy(sortExprs: Column*): DataFrame = sort(sortExprs : _*)
/**
* Selects column based on the column name and return it as a [[Column]].
@ -720,7 +720,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)): _*)
def select(col: String, cols: String*): DataFrame = select((col +: cols).map(Column(_)) : _*)
/**
* Selects a set of SQL expressions. This is a variant of `select` that accepts
@ -948,7 +948,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
groupBy().agg(aggExpr, aggExprs: _*)
groupBy().agg(aggExpr, aggExprs : _*)
}
/**
@ -986,7 +986,7 @@ class DataFrame private[sql](
* @since 1.3.0
*/
@scala.annotation.varargs
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs: _*)
def agg(expr: Column, exprs: Column*): DataFrame = groupBy().agg(expr, exprs : _*)
/**
* Returns a new [[DataFrame]] by taking the first `n` rows. The difference between this function
@ -1118,7 +1118,7 @@ class DataFrame private[sql](
* @group dfops
* @since 1.3.0
*/
def explode[A <: Product: TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
def explode[A <: Product : TypeTag](input: Column*)(f: Row => TraversableOnce[A]): DataFrame = {
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val elementTypes = schema.toAttributes.map {
@ -1147,7 +1147,7 @@ class DataFrame private[sql](
* @group dfops
* @since 1.3.0
*/
def explode[A, B: TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
def explode[A, B : TypeTag](inputColumn: String, outputColumn: String)(f: A => TraversableOnce[B])
: DataFrame = {
val dataType = ScalaReflection.schemaFor[B].dataType
val attributes = AttributeReference(outputColumn, dataType)() :: Nil
@ -1186,7 +1186,7 @@ class DataFrame private[sql](
Column(field)
}
}
select(columns: _*)
select(columns : _*)
} else {
select(Column("*"), col.as(colName))
}
@ -1207,7 +1207,7 @@ class DataFrame private[sql](
Column(field)
}
}
select(columns: _*)
select(columns : _*)
} else {
select(Column("*"), col.as(colName, metadata))
}
@ -1231,7 +1231,7 @@ class DataFrame private[sql](
Column(col)
}
}
select(columns: _*)
select(columns : _*)
} else {
this
}
@ -1244,7 +1244,7 @@ class DataFrame private[sql](
* @since 1.4.0
*/
def drop(colName: String): DataFrame = {
drop(Seq(colName): _*)
drop(Seq(colName) : _*)
}
/**
@ -1283,7 +1283,7 @@ class DataFrame private[sql](
val colsAfterDrop = attrs.filter { attr =>
attr != expression
}.map(attr => Column(attr))
select(colsAfterDrop: _*)
select(colsAfterDrop : _*)
}
/**
@ -1479,7 +1479,7 @@ class DataFrame private[sql](
* @group action
* @since 1.6.0
*/
def takeAsList(n: Int): java.util.List[Row] = java.util.Arrays.asList(take(n): _*)
def takeAsList(n: Int): java.util.List[Row] = java.util.Arrays.asList(take(n) : _*)
/**
* Returns an array that contains all of [[Row]]s in this [[DataFrame]].
@ -1505,7 +1505,7 @@ class DataFrame private[sql](
*/
def collectAsList(): java.util.List[Row] = withCallback("collectAsList", this) { _ =>
withNewExecutionId {
java.util.Arrays.asList(rdd.collect(): _*)
java.util.Arrays.asList(rdd.collect() : _*)
}
}

View file

@ -33,5 +33,5 @@ case class DataFrameHolder private[sql](private val df: DataFrame) {
// `rdd.toDF("1")` as invoking this toDF and then apply on the returned DataFrame.
def toDF(): DataFrame = df
def toDF(colNames: String*): DataFrame = df.toDF(colNames: _*)
def toDF(colNames: String*): DataFrame = df.toDF(colNames : _*)
}

View file

@ -164,7 +164,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
df.select(projections: _*)
df.select(projections : _*)
}
/**
@ -191,7 +191,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
df.select(projections: _*)
df.select(projections : _*)
}
/**
@ -364,7 +364,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
df.col(f.name)
}
}
df.select(projections: _*)
df.select(projections : _*)
}
private def fill0(values: Seq[(String, Any)]): DataFrame = {
@ -395,7 +395,7 @@ final class DataFrameNaFunctions private[sql](df: DataFrame) {
}
}.getOrElse(df.col(f.name))
}
df.select(projections: _*)
df.select(projections : _*)
}
/**

View file

@ -203,7 +203,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i): Partition
JDBCPartition(part, i) : Partition
}
jdbc(url, table, parts, connectionProperties)
}
@ -262,7 +262,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
*
* @since 1.6.0
*/
def json(paths: String*): DataFrame = format("json").load(paths: _*)
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads an `JavaRDD[String]` storing JSON objects (one object per record) and
@ -355,7 +355,7 @@ class DataFrameReader private[sql](sqlContext: SQLContext) extends Logging {
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths: _*)
def text(paths: String*): DataFrame = format("text").load(paths : _*)
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options

View file

@ -131,7 +131,7 @@ class Dataset[T] private[sql](
* along with `alias` or `as` to rearrange or rename as required.
* @since 1.6.0
*/
def as[U: Encoder]: Dataset[U] = {
def as[U : Encoder]: Dataset[U] = {
new Dataset(sqlContext, queryExecution, encoderFor[U])
}
@ -318,7 +318,7 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] that contains the result of applying `func` to each element.
* @since 1.6.0
*/
def map[U: Encoder](func: T => U): Dataset[U] = mapPartitions(_.map(func))
def map[U : Encoder](func: T => U): Dataset[U] = mapPartitions(_.map(func))
/**
* (Java-specific)
@ -333,7 +333,7 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] that contains the result of applying `func` to each partition.
* @since 1.6.0
*/
def mapPartitions[U: Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
def mapPartitions[U : Encoder](func: Iterator[T] => Iterator[U]): Dataset[U] = {
new Dataset[U](
sqlContext,
MapPartitions[T, U](
@ -360,7 +360,7 @@ class Dataset[T] private[sql](
* and then flattening the results.
* @since 1.6.0
*/
def flatMap[U: Encoder](func: T => TraversableOnce[U]): Dataset[U] =
def flatMap[U : Encoder](func: T => TraversableOnce[U]): Dataset[U] =
mapPartitions(_.flatMap(func))
/**
@ -432,7 +432,7 @@ class Dataset[T] private[sql](
* Returns a [[GroupedDataset]] where the data is grouped by the given key `func`.
* @since 1.6.0
*/
def groupBy[K: Encoder](func: T => K): GroupedDataset[K, T] = {
def groupBy[K : Encoder](func: T => K): GroupedDataset[K, T] = {
val inputPlan = logicalPlan
val withGroupingKey = AppendColumns(func, resolvedTEncoder, inputPlan)
val executed = sqlContext.executePlan(withGroupingKey)
@ -566,14 +566,14 @@ class Dataset[T] private[sql](
* Returns a new [[Dataset]] by sampling a fraction of records.
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double, seed: Long): Dataset[T] =
def sample(withReplacement: Boolean, fraction: Double, seed: Long) : Dataset[T] =
withPlan(Sample(0.0, fraction, withReplacement, seed, _))
/**
* Returns a new [[Dataset]] by sampling a fraction of records, using a random seed.
* @since 1.6.0
*/
def sample(withReplacement: Boolean, fraction: Double): Dataset[T] = {
def sample(withReplacement: Boolean, fraction: Double) : Dataset[T] = {
sample(withReplacement, fraction, Utils.random.nextLong)
}
@ -731,7 +731,7 @@ class Dataset[T] private[sql](
* a very large `num` can crash the driver process with OutOfMemoryError.
* @since 1.6.0
*/
def takeAsList(num: Int): java.util.List[T] = java.util.Arrays.asList(take(num): _*)
def takeAsList(num: Int): java.util.List[T] = java.util.Arrays.asList(take(num) : _*)
/**
* Persist this [[Dataset]] with the default storage level (`MEMORY_AND_DISK`).
@ -786,7 +786,7 @@ class Dataset[T] private[sql](
private[sql] def withPlan(f: LogicalPlan => LogicalPlan): Dataset[T] =
new Dataset[T](sqlContext, sqlContext.executePlan(f(logicalPlan)), tEncoder)
private[sql] def withPlan[R: Encoder](
private[sql] def withPlan[R : Encoder](
other: Dataset[_])(
f: (LogicalPlan, LogicalPlan) => LogicalPlan): Dataset[R] =
new Dataset[R](sqlContext, f(logicalPlan, other.logicalPlan))

View file

@ -229,7 +229,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def mean(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames: _*)(Average)
aggregateNumericColumns(colNames : _*)(Average)
}
/**
@ -241,7 +241,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def max(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames: _*)(Max)
aggregateNumericColumns(colNames : _*)(Max)
}
/**
@ -253,7 +253,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def avg(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames: _*)(Average)
aggregateNumericColumns(colNames : _*)(Average)
}
/**
@ -265,7 +265,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def min(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames: _*)(Min)
aggregateNumericColumns(colNames : _*)(Min)
}
/**
@ -277,7 +277,7 @@ class GroupedData protected[sql](
*/
@scala.annotation.varargs
def sum(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames: _*)(Sum)
aggregateNumericColumns(colNames : _*)(Sum)
}
/**

View file

@ -73,7 +73,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
def keyAs[L: Encoder]: GroupedDataset[L, V] =
def keyAs[L : Encoder]: GroupedDataset[L, V] =
new GroupedDataset(
encoderFor[L],
unresolvedVEncoder,
@ -110,7 +110,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
def flatMapGroups[U: Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = {
def flatMapGroups[U : Encoder](f: (K, Iterator[V]) => TraversableOnce[U]): Dataset[U] = {
new Dataset[U](
sqlContext,
MapGroups(
@ -158,7 +158,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
def mapGroups[U: Encoder](f: (K, Iterator[V]) => U): Dataset[U] = {
def mapGroups[U : Encoder](f: (K, Iterator[V]) => U): Dataset[U] = {
val func = (key: K, it: Iterator[V]) => Iterator(f(key, it))
flatMapGroups(func)
}
@ -302,7 +302,7 @@ class GroupedDataset[K, V] private[sql](
*
* @since 1.6.0
*/
def cogroup[U, R: Encoder](
def cogroup[U, R : Encoder](
other: GroupedDataset[K, U])(
f: (K, Iterator[V], Iterator[U]) => TraversableOnce[R]): Dataset[R] = {
new Dataset[R](

View file

@ -409,7 +409,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
@Experimental
def createDataFrame[A <: Product: TypeTag](rdd: RDD[A]): DataFrame = {
def createDataFrame[A <: Product : TypeTag](rdd: RDD[A]): DataFrame = {
SQLContext.setActive(self)
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val attributeSeq = schema.toAttributes
@ -425,7 +425,7 @@ class SQLContext private[sql](
* @since 1.3.0
*/
@Experimental
def createDataFrame[A <: Product: TypeTag](data: Seq[A]): DataFrame = {
def createDataFrame[A <: Product : TypeTag](data: Seq[A]): DataFrame = {
SQLContext.setActive(self)
val schema = ScalaReflection.schemaFor[A].dataType.asInstanceOf[StructType]
val attributeSeq = schema.toAttributes
@ -498,7 +498,7 @@ class SQLContext private[sql](
}
def createDataset[T: Encoder](data: Seq[T]): Dataset[T] = {
def createDataset[T : Encoder](data: Seq[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d).copy())
@ -507,7 +507,7 @@ class SQLContext private[sql](
new Dataset[T](this, plan)
}
def createDataset[T: Encoder](data: RDD[T]): Dataset[T] = {
def createDataset[T : Encoder](data: RDD[T]): Dataset[T] = {
val enc = encoderFor[T]
val attributes = enc.schema.toAttributes
val encoded = data.map(d => enc.toRow(d))
@ -516,7 +516,7 @@ class SQLContext private[sql](
new Dataset[T](this, plan)
}
def createDataset[T: Encoder](data: java.util.List[T]): Dataset[T] = {
def createDataset[T : Encoder](data: java.util.List[T]): Dataset[T] = {
createDataset(data.asScala)
}
@ -945,7 +945,7 @@ class SQLContext private[sql](
}
}
// Register a successfully instantiated context to the singleton. This should be at the end of
// Register a succesfully instantiatd context to the singleton. This should be at the end of
// the class definition so that the singleton is updated only if there is no exception in the
// construction of the instance.
sparkContext.addSparkListener(new SparkListener {

View file

@ -37,7 +37,7 @@ abstract class SQLImplicits {
protected def _sqlContext: SQLContext
/** @since 1.6.0 */
implicit def newProductEncoder[T <: Product: TypeTag]: Encoder[T] = ExpressionEncoder()
implicit def newProductEncoder[T <: Product : TypeTag]: Encoder[T] = ExpressionEncoder()
/** @since 1.6.0 */
implicit def newIntEncoder: Encoder[Int] = ExpressionEncoder()
@ -67,7 +67,7 @@ abstract class SQLImplicits {
* Creates a [[Dataset]] from an RDD.
* @since 1.6.0
*/
implicit def rddToDatasetHolder[T: Encoder](rdd: RDD[T]): DatasetHolder[T] = {
implicit def rddToDatasetHolder[T : Encoder](rdd: RDD[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(rdd))
}
@ -75,7 +75,7 @@ abstract class SQLImplicits {
* Creates a [[Dataset]] from a local Seq.
* @since 1.6.0
*/
implicit def localSeqToDatasetHolder[T: Encoder](s: Seq[T]): DatasetHolder[T] = {
implicit def localSeqToDatasetHolder[T : Encoder](s: Seq[T]): DatasetHolder[T] = {
DatasetHolder(_sqlContext.createDataset(s))
}
@ -89,7 +89,7 @@ abstract class SQLImplicits {
* Creates a DataFrame from an RDD of Product (e.g. case classes, tuples).
* @since 1.3.0
*/
implicit def rddToDataFrameHolder[A <: Product: TypeTag](rdd: RDD[A]): DataFrameHolder = {
implicit def rddToDataFrameHolder[A <: Product : TypeTag](rdd: RDD[A]): DataFrameHolder = {
DataFrameHolder(_sqlContext.createDataFrame(rdd))
}
@ -97,7 +97,7 @@ abstract class SQLImplicits {
* Creates a DataFrame from a local Seq of Product.
* @since 1.3.0
*/
implicit def localSeqToDataFrameHolder[A <: Product: TypeTag](data: Seq[A]): DataFrameHolder =
implicit def localSeqToDataFrameHolder[A <: Product : TypeTag](data: Seq[A]): DataFrameHolder =
{
DataFrameHolder(_sqlContext.createDataFrame(data))
}

View file

@ -39,7 +39,7 @@ private[r] object SQLUtils {
new JavaSparkContext(sqlCtx.sparkContext)
}
def createStructType(fields: Seq[StructField]): StructType = {
def createStructType(fields : Seq[StructField]): StructType = {
StructType(fields)
}

View file

@ -223,7 +223,7 @@ case class Exchange(
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
protected override def doExecute(): RDD[InternalRow] = attachTree(this , "execute") {
coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)

View file

@ -71,7 +71,7 @@ private[sql] trait Queryable {
private[sql] def formatString (
rows: Seq[Seq[String]],
numRows: Int,
hasMoreData: Boolean,
hasMoreData : Boolean,
truncate: Boolean = true): String = {
val sb = new StringBuilder
val numCols = schema.fieldNames.length

View file

@ -29,7 +29,7 @@ import org.apache.spark.sql.expressions.Aggregator
import org.apache.spark.sql.types._
object TypedAggregateExpression {
def apply[A, B: Encoder, C: Encoder](
def apply[A, B : Encoder, C : Encoder](
aggregator: Aggregator[A, B, C]): TypedAggregateExpression = {
new TypedAggregateExpression(
aggregator.asInstanceOf[Aggregator[Any, Any, Any]],

View file

@ -256,7 +256,7 @@ private[spark] class SqlNewHadoopRDD[V: ClassTag](
val infos = c.newGetLocationInfo.invoke(split).asInstanceOf[Array[AnyRef]]
Some(HadoopRDD.convertSplitLocationInfo(infos))
} catch {
case e: Exception =>
case e : Exception =>
logDebug("Failed to use InputSplit#getLocationInfo.", e)
None
}

View file

@ -557,7 +557,7 @@ private[parquet] object CatalystSchemaConverter {
}
}
private def computeMinBytesForPrecision(precision: Int): Int = {
private def computeMinBytesForPrecision(precision : Int) : Int = {
var numBytes = 1
while (math.pow(2.0, 8 * numBytes - 1) < math.pow(10.0, precision)) {
numBytes += 1

View file

@ -34,7 +34,7 @@ import org.apache.spark.util.collection.unsafe.sort.UnsafeExternalSorter
* materialize the right RDD (in case of the right RDD is nondeterministic).
*/
private[spark]
class UnsafeCartesianRDD(left: RDD[UnsafeRow], right: RDD[UnsafeRow], numFieldsOfRight: Int)
class UnsafeCartesianRDD(left : RDD[UnsafeRow], right : RDD[UnsafeRow], numFieldsOfRight: Int)
extends CartesianRDD[UnsafeRow, UnsafeRow](left.sparkContext, left, right) {
override def compute(split: Partition, context: TaskContext): Iterator[(UnsafeRow, UnsafeRow)] = {

View file

@ -64,7 +64,7 @@ private[sql] trait SQLMetricValue[T] extends Serializable {
/**
* A wrapper of Long to avoid boxing and unboxing when using Accumulator
*/
private[sql] class LongSQLMetricValue(private var _value: Long) extends SQLMetricValue[Long] {
private[sql] class LongSQLMetricValue(private var _value : Long) extends SQLMetricValue[Long] {
def add(incr: Long): LongSQLMetricValue = {
_value += incr

View file

@ -94,7 +94,7 @@ private[sql] object FrequentItems extends Logging {
(name, originalSchema.fields(index).dataType)
}.toArray
val freqItems = df.select(cols.map(Column(_)): _*).rdd.aggregate(countMaps)(
val freqItems = df.select(cols.map(Column(_)) : _*).rdd.aggregate(countMaps)(
seqOp = (counts, row) => {
var i = 0
while (i < numCols) {
@ -115,7 +115,7 @@ private[sql] object FrequentItems extends Logging {
}
)
val justItems = freqItems.map(m => m.baseMap.keys.toArray)
val resultRow = Row(justItems: _*)
val resultRow = Row(justItems : _*)
// append frequent Items to the column name for easy debugging
val outputCols = colInfo.map { v =>
StructField(v._1 + "_freqItems", ArrayType(v._2, false))

View file

@ -44,7 +44,7 @@ object Window {
*/
@scala.annotation.varargs
def partitionBy(colName: String, colNames: String*): WindowSpec = {
spec.partitionBy(colName, colNames: _*)
spec.partitionBy(colName, colNames : _*)
}
/**
@ -53,7 +53,7 @@ object Window {
*/
@scala.annotation.varargs
def partitionBy(cols: Column*): WindowSpec = {
spec.partitionBy(cols: _*)
spec.partitionBy(cols : _*)
}
/**
@ -62,7 +62,7 @@ object Window {
*/
@scala.annotation.varargs
def orderBy(colName: String, colNames: String*): WindowSpec = {
spec.orderBy(colName, colNames: _*)
spec.orderBy(colName, colNames : _*)
}
/**
@ -71,7 +71,7 @@ object Window {
*/
@scala.annotation.varargs
def orderBy(cols: Column*): WindowSpec = {
spec.orderBy(cols: _*)
spec.orderBy(cols : _*)
}
private def spec: WindowSpec = {

View file

@ -306,7 +306,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def countDistinct(columnName: String, columnNames: String*): Column =
countDistinct(Column(columnName), columnNames.map(Column.apply): _*)
countDistinct(Column(columnName), columnNames.map(Column.apply) : _*)
/**
* Aggregate function: returns the first value in a group.
@ -768,7 +768,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def array(colName: String, colNames: String*): Column = {
array((colName +: colNames).map(col): _*)
array((colName +: colNames).map(col) : _*)
}
/**
@ -977,7 +977,7 @@ object functions extends LegacyFunctions {
*/
@scala.annotation.varargs
def struct(colName: String, colNames: String*): Column = {
struct((colName +: colNames).map(col): _*)
struct((colName +: colNames).map(col) : _*)
}
/**

View file

@ -30,7 +30,7 @@ private class AggregatedDialect(dialects: List[JdbcDialect]) extends JdbcDialect
require(dialects.nonEmpty)
override def canHandle(url: String): Boolean =
override def canHandle(url : String): Boolean =
dialects.map(_.canHandle(url)).reduce(_ && _)
override def getCatalystType(

View file

@ -31,7 +31,7 @@ import org.apache.spark.sql.types._
* send a null value to the database.
*/
@DeveloperApi
case class JdbcType(databaseTypeDefinition: String, jdbcNullType: Int)
case class JdbcType(databaseTypeDefinition : String, jdbcNullType : Int)
/**
* :: DeveloperApi ::
@ -60,7 +60,7 @@ abstract class JdbcDialect extends Serializable {
* @return True if the dialect can be applied on the given jdbc url.
* @throws NullPointerException if the url is null.
*/
def canHandle(url: String): Boolean
def canHandle(url : String): Boolean
/**
* Get the custom datatype mapping for the given jdbc meta information.
@ -130,7 +130,7 @@ object JdbcDialects {
*
* @param dialect The new dialect.
*/
def registerDialect(dialect: JdbcDialect): Unit = {
def registerDialect(dialect: JdbcDialect) : Unit = {
dialects = dialect :: dialects.filterNot(_ == dialect)
}
@ -139,7 +139,7 @@ object JdbcDialects {
*
* @param dialect The jdbc dialect.
*/
def unregisterDialect(dialect: JdbcDialect): Unit = {
def unregisterDialect(dialect : JdbcDialect) : Unit = {
dialects = dialects.filterNot(_ == dialect)
}
@ -169,5 +169,5 @@ object JdbcDialects {
* NOOP dialect object, always returning the neutral element.
*/
private object NoopDialect extends JdbcDialect {
override def canHandle(url: String): Boolean = true
override def canHandle(url : String): Boolean = true
}

View file

@ -23,13 +23,10 @@ import org.apache.spark.sql.types.{BooleanType, DataType, LongType, MetadataBuil
private case object MySQLDialect extends JdbcDialect {
override def canHandle(url: String): Boolean = url.startsWith("jdbc:mysql")
override def canHandle(url : String): Boolean = url.startsWith("jdbc:mysql")
override def getCatalystType(
sqlType: Int,
typeName: String,
size: Int,
md: MetadataBuilder): Option[DataType] = {
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] = {
if (sqlType == Types.VARBINARY && typeName.equals("BIT") && size != 1) {
// This could instead be a BinaryType if we'd rather return bit-vectors of up to 64 bits as
// byte arrays instead of longs.

View file

@ -24,7 +24,7 @@ import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSQLContext
/** An `Aggregator` that adds up any numeric type returned by the given function. */
class SumOf[I, N: Numeric](f: I => N) extends Aggregator[I, N, N] {
class SumOf[I, N : Numeric](f: I => N) extends Aggregator[I, N, N] {
val numeric = implicitly[Numeric[N]]
override def zero: N = numeric.zero
@ -113,7 +113,7 @@ class DatasetAggregatorSuite extends QueryTest with SharedSQLContext {
import testImplicits._
def sum[I, N: Numeric: Encoder](f: I => N): TypedColumn[I, N] =
def sum[I, N : Numeric : Encoder](f: I => N): TypedColumn[I, N] =
new SumOf(f).toColumn
test("typed aggregation: TypedAggregator") {

View file

@ -27,7 +27,7 @@ class DatasetCacheSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("persist and unpersist") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS().select(expr("_2 + 1").as[Int])
val cached = ds.cache()
// count triggers the caching action. It should not throw.
cached.count()

View file

@ -30,7 +30,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
import testImplicits._
test("toDS") {
val data = Seq(("a", 1), ("b", 2), ("c", 3))
val data = Seq(("a", 1) , ("b", 2), ("c", 3))
checkAnswer(
data.toDS(),
data: _*)
@ -87,7 +87,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("as case class / collect") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDF("a", "b").as[ClassData]
checkAnswer(
ds,
ClassData("a", 1), ClassData("b", 2), ClassData("c", 3))
@ -105,7 +105,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("map") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.map(v => (v._1, v._2 + 1)),
("a", 2), ("b", 3), ("c", 4))
@ -124,23 +124,23 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(expr("_2 + 1").as[Int]),
2, 3, 4)
}
test("select 2") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
expr("_2").as[Int]): Dataset[(String, Int)],
expr("_2").as[Int]) : Dataset[(String, Int)],
("a", 1), ("b", 2), ("c", 3))
}
test("select 2, primitive and tuple") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@ -149,7 +149,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.select(
expr("_1").as[String],
@ -158,7 +158,7 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("select 2, primitive and class, fields reordered") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkDecoding(
ds.select(
expr("_1").as[String],
@ -167,28 +167,28 @@ class DatasetSuite extends QueryTest with SharedSQLContext {
}
test("filter") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
checkAnswer(
ds.filter(_._1 == "b"),
("b", 2))
}
test("foreach") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreach(v => acc += v._2)
assert(acc.value == 6)
}
test("foreachPartition") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
val acc = sparkContext.accumulator(0)
ds.foreachPartition(_.foreach(v => acc += v._2))
assert(acc.value == 6)
}
test("reduce") {
val ds = Seq(("a", 1), ("b", 2), ("c", 3)).toDS()
val ds = Seq(("a", 1) , ("b", 2), ("c", 3)).toDS()
assert(ds.reduce((a, b) => ("sum", a._2 + b._2)) == ("sum", 6))
}

View file

@ -206,7 +206,7 @@ class JsonSuite extends QueryTest with SharedSQLContext with TestJsonData {
StructType(
StructField("f1", IntegerType, true) ::
StructField("f2", IntegerType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil),
StructType(StructField("f1", LongType, true) :: Nil) ,
StructType(
StructField("f1", LongType, true) ::
StructField("f2", IntegerType, true) :: Nil))

View file

@ -72,7 +72,7 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
protected def checkParquetFile[T <: Product: ClassTag: TypeTag](data: Seq[T]): Unit = {
protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}

View file

@ -46,7 +46,7 @@ class JDBCSuite extends SparkFunSuite
val testBytes = Array[Byte](99.toByte, 134.toByte, 135.toByte, 200.toByte, 205.toByte)
val testH2Dialect = new JdbcDialect {
override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2")
override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
Some(StringType)
@ -489,7 +489,7 @@ class JDBCSuite extends SparkFunSuite
test("Aggregated dialects") {
val agg = new AggregatedDialect(List(new JdbcDialect {
override def canHandle(url: String): Boolean = url.startsWith("jdbc:h2:")
override def canHandle(url: String) : Boolean = url.startsWith("jdbc:h2:")
override def getCatalystType(
sqlType: Int, typeName: String, size: Int, md: MetadataBuilder): Option[DataType] =
if (sqlType % 2 == 0) {

View file

@ -18,7 +18,7 @@
package org.apache.spark.sql.hive.thriftserver
private[hive] object ReflectionUtils {
def setSuperField(obj: Object, fieldName: String, fieldValue: Object) {
def setSuperField(obj : Object, fieldName: String, fieldValue: Object) {
setAncestorField(obj, 1, fieldName, fieldValue)
}

View file

@ -325,7 +325,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
if (ret != 0) {
// For analysis exception, only the error is printed out to the console.
rc.getException() match {
case e: AnalysisException =>
case e : AnalysisException =>
err.println(s"""Error in query: ${e.getMessage}""")
case _ => err.println(rc.getErrorMessage())
}
@ -369,7 +369,7 @@ private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
if (counter != 0) {
responseMsg += s", Fetched $counter row(s)"
}
console.printInfo(responseMsg, null)
console.printInfo(responseMsg , null)
// Destroy the driver to release all the locks.
driver.destroy()
} else {

View file

@ -657,8 +657,8 @@ private[hive] trait HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.javaHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
java.util.Arrays.asList(fields.map(f => f.name): _*),
java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)): _*))
java.util.Arrays.asList(fields.map(f => f.name) : _*),
java.util.Arrays.asList(fields.map(f => toInspector(f.dataType)) : _*))
}
/**
@ -905,8 +905,8 @@ private[hive] trait HiveInspectors {
getListTypeInfo(elemType.toTypeInfo)
case StructType(fields) =>
getStructTypeInfo(
java.util.Arrays.asList(fields.map(_.name): _*),
java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo): _*))
java.util.Arrays.asList(fields.map(_.name) : _*),
java.util.Arrays.asList(fields.map(_.dataType.toTypeInfo) : _*))
case MapType(keyType, valueType, _) =>
getMapTypeInfo(keyType.toTypeInfo, valueType.toTypeInfo)
case BinaryType => binaryTypeInfo

View file

@ -181,7 +181,7 @@ private[hive] case class HiveSimpleUDF(
val ret = FunctionRegistry.invoke(
method,
function,
conversionHelper.convertIfNecessary(inputs: _*): _*)
conversionHelper.convertIfNecessary(inputs : _*): _*)
unwrap(ret, returnInspector)
}

View file

@ -118,8 +118,8 @@ class HiveInspectorSuite extends SparkFunSuite with HiveInspectors {
case DecimalType() => PrimitiveObjectInspectorFactory.writableHiveDecimalObjectInspector
case StructType(fields) =>
ObjectInspectorFactory.getStandardStructObjectInspector(
java.util.Arrays.asList(fields.map(f => f.name): _*),
java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)): _*))
java.util.Arrays.asList(fields.map(f => f.name) : _*),
java.util.Arrays.asList(fields.map(f => toWritableInspector(f.dataType)) : _*))
}
def checkDataType(dt1: Seq[DataType], dt2: Seq[DataType]): Unit = {

View file

@ -154,8 +154,8 @@ class InsertIntoHiveTableSuite extends QueryTest with TestHiveSingleton with Bef
}
val expected = List(
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=2"::Nil,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=3"::Nil,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=3"::Nil ,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=1"::Nil ,
"p1=a"::"p2=b"::"p3=c"::"p4=c"::"p5=4"::Nil
)
assert(listFolders(tmpDir, List()).sortBy(_.toString()) === expected.sortBy(_.toString))