[SPARK-31563][SQL] Fix failure of InSet.sql for collections of Catalyst's internal types

### What changes were proposed in this pull request?
In the PR, I propose to fix the `InSet.sql` method for the cases when input collection contains values of internal Catalyst's types, for instance `UTF8String`. Elements of the input set `hset` are converted to Scala types, and wrapped by `Literal` to properly form SQL view of the input collection.

### Why are the changes needed?
The changes fixed the bug in `InSet.sql` that makes wrong assumption about types of collection elements. See more details in SPARK-31563.

### Does this PR introduce any user-facing change?
Highly likely, not.

### How was this patch tested?
Added a test to `ColumnExpressionSuite`

Closes #28343 from MaxGekk/fix-InSet-sql.

Authored-by: Max Gekk <max.gekk@gmail.com>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
This commit is contained in:
Max Gekk 2020-04-25 09:29:51 -07:00 committed by Dongjoon Hyun
parent ab8cada1f9
commit 7d8216a664
No known key found for this signature in database
GPG key ID: EDA00CE834F0FC5C
2 changed files with 11 additions and 2 deletions

View file

@ -19,6 +19,7 @@ package org.apache.spark.sql.catalyst.expressions
import scala.collection.immutable.TreeSet
import org.apache.spark.sql.catalyst.CatalystTypeConverters.convertToScala
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.BindReferences.bindReference
@ -519,7 +520,9 @@ case class InSet(child: Expression, hset: Set[Any]) extends UnaryExpression with
override def sql: String = {
val valueSQL = child.sql
val listSQL = hset.toSeq.map(Literal(_).sql).mkString(", ")
val listSQL = hset.toSeq
.map(elem => Literal(convertToScala(elem, child.dataType)).sql)
.mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}

View file

@ -26,12 +26,13 @@ import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.{TextInputFormat => NewTextInputFormat}
import org.scalatest.Matchers._
import org.apache.spark.sql.catalyst.expressions.{In, InSet, NamedExpression}
import org.apache.spark.sql.catalyst.expressions.{In, InSet, Literal, NamedExpression}
import org.apache.spark.sql.execution.ProjectExec
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
class ColumnExpressionSuite extends QueryTest with SharedSparkSession {
import testImplicits._
@ -869,4 +870,9 @@ class ColumnExpressionSuite extends QueryTest with SharedSparkSession {
df.select(typedLit(("a", 2, 1.0))),
Row(Row("a", 2, 1.0)) :: Nil)
}
test("SPARK-31563: sql of InSet for UTF8String collection") {
val inSet = InSet(Literal("a"), Set("a", "b").map(UTF8String.fromString))
assert(inSet.sql === "('a' IN ('a', 'b'))")
}
}