[SPARK-5472][SQL] Fix Scala code style

Fix Scala code style.

Author: Hung Lin <hung@zoomdata.com>

Closes #4464 from hunglin/SPARK-5472 and squashes the following commits:

ef7a3b3 [Hung Lin] SPARK-5472: fix scala style
This commit is contained in:
Hung Lin 2015-02-08 22:36:42 -08:00 committed by Reynold Xin
parent 4396dfb37f
commit 4575c5643a
2 changed files with 41 additions and 36 deletions

View file

@ -17,13 +17,10 @@
package org.apache.spark.sql.jdbc
import java.sql.{Connection, DatabaseMetaData, DriverManager, ResultSet, ResultSetMetaData, SQLException}
import scala.collection.mutable.ArrayBuffer
import java.sql.{Connection, DriverManager, ResultSet, ResultSetMetaData, SQLException}
import org.apache.spark.{Logging, Partition, SparkContext, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.util.NextIterator
import org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion
import org.apache.spark.sql.catalyst.expressions.{Row, SpecificMutableRow}
import org.apache.spark.sql.types._
import org.apache.spark.sql.sources._
@ -100,7 +97,7 @@ private[sql] object JDBCRDD extends Logging {
try {
val rsmd = rs.getMetaData
val ncols = rsmd.getColumnCount
var fields = new Array[StructField](ncols);
val fields = new Array[StructField](ncols)
var i = 0
while (i < ncols) {
val columnName = rsmd.getColumnName(i + 1)
@ -176,7 +173,8 @@ private[sql] object JDBCRDD extends Logging {
*
* @return An RDD representing "SELECT requiredColumns FROM fqTable".
*/
def scanTable(sc: SparkContext,
def scanTable(
sc: SparkContext,
schema: StructType,
driver: String,
url: String,
@ -184,9 +182,12 @@ private[sql] object JDBCRDD extends Logging {
requiredColumns: Array[String],
filters: Array[Filter],
parts: Array[Partition]): RDD[Row] = {
val prunedSchema = pruneSchema(schema, requiredColumns)
return new JDBCRDD(sc,
return new
JDBCRDD(
sc,
getConnector(driver, url),
prunedSchema,
fqTable,
@ -412,6 +413,5 @@ private[sql] class JDBCRDD(
gotNext = false
nextValue
}
}
}

View file

@ -96,7 +96,8 @@ private[sql] class DefaultSource extends RelationProvider {
if (driver != null) Class.forName(driver)
if ( partitionColumn != null
if (
partitionColumn != null
&& (lowerBound == null || upperBound == null || numPartitions == null)) {
sys.error("Partitioning incompletely specified")
}
@ -104,8 +105,10 @@ private[sql] class DefaultSource extends RelationProvider {
val partitionInfo = if (partitionColumn == null) {
null
} else {
JDBCPartitioningInfo(partitionColumn,
lowerBound.toLong, upperBound.toLong,
JDBCPartitioningInfo(
partitionColumn,
lowerBound.toLong,
upperBound.toLong,
numPartitions.toInt)
}
val parts = JDBCRelation.columnPartition(partitionInfo)
@ -113,21 +116,23 @@ private[sql] class DefaultSource extends RelationProvider {
}
}
private[sql] case class JDBCRelation(url: String,
private[sql] case class JDBCRelation(
url: String,
table: String,
parts: Array[Partition])(
@transient val sqlContext: SQLContext)
extends PrunedFilteredScan {
parts: Array[Partition])(@transient val sqlContext: SQLContext) extends PrunedFilteredScan {
override val schema = JDBCRDD.resolveTable(url, table)
override def buildScan(requiredColumns: Array[String], filters: Array[Filter]) = {
val driver: String = DriverManager.getDriver(url).getClass.getCanonicalName
JDBCRDD.scanTable(sqlContext.sparkContext,
JDBCRDD.scanTable(
sqlContext.sparkContext,
schema,
driver, url,
driver,
url,
table,
requiredColumns, filters,
requiredColumns,
filters,
parts)
}
}