[SQL] When creating partitioned table scan, explicitly create UnionRDD.
Otherwise, it will cause stack overflow when there are many partitions.
Author: Yin Huai <yhuai@databricks.com>
Closes #6162 from yhuai/partitionUnionedRDD and squashes the following commits:
fa016d8 [Yin Huai] Explicitly create UnionRDD.
(cherry picked from commit e8f0e016ea
)
Signed-off-by: Cheng Lian <lian@databricks.com>
This commit is contained in:
parent
bac45229aa
commit
7aa269f4bb
|
@ -21,7 +21,7 @@ import org.apache.hadoop.fs.Path
|
|||
|
||||
import org.apache.spark.Logging
|
||||
import org.apache.spark.deploy.SparkHadoopUtil
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.rdd.{UnionRDD, RDD}
|
||||
import org.apache.spark.sql.Row
|
||||
import org.apache.spark.sql.catalyst.expressions
|
||||
import org.apache.spark.sql.catalyst.expressions._
|
||||
|
@ -169,8 +169,11 @@ private[sql] object DataSourceStrategy extends Strategy with Logging {
|
|||
scan.execute()
|
||||
}
|
||||
|
||||
val unionedRows = perPartitionRows.reduceOption(_ ++ _).getOrElse {
|
||||
val unionedRows =
|
||||
if (perPartitionRows.length == 0) {
|
||||
relation.sqlContext.emptyResult
|
||||
} else {
|
||||
new UnionRDD(relation.sqlContext.sparkContext, perPartitionRows)
|
||||
}
|
||||
|
||||
createPhysicalRDD(logicalRelation.relation, output, unionedRows)
|
||||
|
|
Loading…
Reference in a new issue