[SPARK-2890][SQL] Allow reading of data when case insensitive resolution could cause possible ambiguity.

Throwing an error in the constructor makes it possible to run queries, even when there is no actual ambiguity.  Remove this check in favor of throwing an error in analysis when they query is actually is ambiguous.

Also took the opportunity to add test cases that would have caught a subtle bug in my first attempt at fixing this and refactor some other test code.

Author: Michael Armbrust <michael@databricks.com>

Closes #2209 from marmbrus/sameNameStruct and squashes the following commits:

729cca4 [Michael Armbrust] Better tests.
a003aeb [Michael Armbrust] Remove error (it'll be caught in analysis).
This commit is contained in:
Michael Armbrust 2014-09-16 11:42:26 -07:00
parent 7583699873
commit 30f288ae34
2 changed files with 45 additions and 28 deletions

View file

@ -308,13 +308,9 @@ case class StructField(name: String, dataType: DataType, nullable: Boolean) {
object StructType { object StructType {
protected[sql] def fromAttributes(attributes: Seq[Attribute]): StructType = protected[sql] def fromAttributes(attributes: Seq[Attribute]): StructType =
StructType(attributes.map(a => StructField(a.name, a.dataType, a.nullable))) StructType(attributes.map(a => StructField(a.name, a.dataType, a.nullable)))
private def validateFields(fields: Seq[StructField]): Boolean =
fields.map(field => field.name).distinct.size == fields.size
} }
case class StructType(fields: Seq[StructField]) extends DataType { case class StructType(fields: Seq[StructField]) extends DataType {
require(StructType.validateFields(fields), "Found fields with the same name.")
/** /**
* Returns all field names in a [[Seq]]. * Returns all field names in a [[Seq]].

View file

@ -17,47 +17,68 @@
package org.apache.spark.sql.hive.execution package org.apache.spark.sql.hive.execution
import org.apache.spark.sql.hive.test.TestHive
import org.apache.hadoop.conf.Configuration
import org.apache.spark.SparkContext._
import java.util
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.hive.serde2.{SerDeStats, AbstractSerDe}
import org.apache.hadoop.io.{NullWritable, Writable}
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorFactory, ObjectInspector}
import java.util.Properties
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import scala.collection.JavaConversions._
import java.io.{DataOutput, DataInput} import java.io.{DataOutput, DataInput}
import java.util
import java.util.Properties
import org.apache.spark.util.Utils
import scala.collection.JavaConversions._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.serde2.{SerDeStats, AbstractSerDe}
import org.apache.hadoop.io.Writable
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorFactory, ObjectInspector}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF import org.apache.hadoop.hive.ql.udf.generic.GenericUDF
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject
import org.apache.spark.sql.Row
import org.apache.spark.sql.hive.test.TestHive
import org.apache.spark.sql.hive.test.TestHive._
case class Fields(f1: Int, f2: Int, f3: Int, f4: Int, f5: Int)
/** /**
* A test suite for Hive custom UDFs. * A test suite for Hive custom UDFs.
*/ */
class HiveUdfSuite extends HiveComparisonTest { class HiveUdfSuite extends HiveComparisonTest {
TestHive.sql( test("spark sql udf test that returns a struct") {
""" registerFunction("getStruct", (_: Int) => Fields(1, 2, 3, 4, 5))
assert(sql(
"""
|SELECT getStruct(1).f1,
| getStruct(1).f2,
| getStruct(1).f3,
| getStruct(1).f4,
| getStruct(1).f5 FROM src LIMIT 1
""".stripMargin).first() === Row(1, 2, 3, 4, 5))
}
test("hive struct udf") {
sql(
"""
|CREATE EXTERNAL TABLE hiveUdfTestTable ( |CREATE EXTERNAL TABLE hiveUdfTestTable (
| pair STRUCT<id: INT, value: INT> | pair STRUCT<id: INT, value: INT>
|) |)
|PARTITIONED BY (partition STRING) |PARTITIONED BY (partition STRING)
|ROW FORMAT SERDE '%s' |ROW FORMAT SERDE '%s'
|STORED AS SEQUENCEFILE |STORED AS SEQUENCEFILE
""".stripMargin.format(classOf[PairSerDe].getName) """.
) stripMargin.format(classOf[PairSerDe].getName))
TestHive.sql( val location = Utils.getSparkClassLoader.getResource("data/files/testUdf").getFile
"ALTER TABLE hiveUdfTestTable ADD IF NOT EXISTS PARTITION(partition='testUdf') LOCATION '%s'" sql(s"""
.format(this.getClass.getClassLoader.getResource("data/files/testUdf").getFile) ALTER TABLE hiveUdfTestTable
) ADD IF NOT EXISTS PARTITION(partition='testUdf')
LOCATION '$location'""")
TestHive.sql("CREATE TEMPORARY FUNCTION testUdf AS '%s'".format(classOf[PairUdf].getName)) sql(s"CREATE TEMPORARY FUNCTION testUdf AS '${classOf[PairUdf].getName}'")
sql("SELECT testUdf(pair) FROM hiveUdfTestTable")
TestHive.sql("SELECT testUdf(pair) FROM hiveUdfTestTable") sql("DROP TEMPORARY FUNCTION IF EXISTS testUdf")
}
TestHive.sql("DROP TEMPORARY FUNCTION IF EXISTS testUdf")
} }
class TestPair(x: Int, y: Int) extends Writable with Serializable { class TestPair(x: Int, y: Int) extends Writable with Serializable {