From 73412ffb3a857acda5dab41d7be3f7ae627f6eaf Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Sun, 6 Dec 2020 19:34:54 -0800 Subject: [PATCH] [SPARK-33680][SQL][TESTS] Fix PrunePartitionSuiteBase/BucketedReadWithHiveSupportSuite not to depend on the default conf ### What changes were proposed in this pull request? This PR updates `PrunePartitionSuiteBase/BucketedReadWithHiveSupportSuite` to have the require conf explicitly. ### Why are the changes needed? The unit test should not depend on the default configurations. ### Does this PR introduce _any_ user-facing change? No. ### How was this patch tested? According to https://github.com/apache/spark/pull/30628 , this seems to be the only ones. Pass the CIs. Closes #30631 from dongjoon-hyun/SPARK-CONF-AGNO. Authored-by: Dongjoon Hyun Signed-off-by: Dongjoon Hyun --- .../execution/PrunePartitionSuiteBase.scala | 81 ++++++++++--------- .../BucketedReadWithHiveSupportSuite.scala | 4 +- 2 files changed, 45 insertions(+), 40 deletions(-) diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala index 8e35cd0343..bc170fcd59 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/PrunePartitionSuiteBase.scala @@ -21,6 +21,7 @@ import org.apache.spark.sql.QueryTest import org.apache.spark.sql.catalyst.expressions.{AttributeReference, BinaryOperator, Expression, IsNotNull, Literal} import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan} import org.apache.spark.sql.hive.test.TestHiveSingleton +import org.apache.spark.sql.internal.SQLConf.ADAPTIVE_EXECUTION_ENABLED import org.apache.spark.sql.test.SQLTestUtils abstract class PrunePartitionSuiteBase extends QueryTest with SQLTestUtils with TestHiveSingleton { @@ -28,48 +29,50 @@ abstract class PrunePartitionSuiteBase extends QueryTest with SQLTestUtils with protected def format: String test("SPARK-28169: Convert scan predicate condition to CNF") { - withTempView("temp") { - withTable("t") { - sql( - s""" - |CREATE TABLE t(i INT, p STRING) - |USING $format - |PARTITIONED BY (p)""".stripMargin) - - spark.range(0, 1000, 1).selectExpr("id as col") - .createOrReplaceTempView("temp") - - for (part <- Seq(1, 2, 3, 4)) { + withSQLConf(ADAPTIVE_EXECUTION_ENABLED.key -> "false") { + withTempView("temp") { + withTable("t") { sql( s""" - |INSERT OVERWRITE TABLE t PARTITION (p='$part') - |SELECT col FROM temp""".stripMargin) - } + |CREATE TABLE t(i INT, p STRING) + |USING $format + |PARTITIONED BY (p)""".stripMargin) - assertPrunedPartitions( - "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, - "((`p` = '1') || (`p` = '2'))") - assertPrunedPartitions( - "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4, - "") - assertPrunedPartitions( - "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 2, - "((`p` = '1') || (`p` = '3'))") - assertPrunedPartitions( - "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 3, - "((`p` = '1') || ((`p` = '2') || (`p` = '3')))") - assertPrunedPartitions( - "SELECT * FROM t", 4, - "") - assertPrunedPartitions( - "SELECT * FROM t WHERE p = '1' AND i = 2", 1, - "(`p` = '1')") - assertPrunedPartitions( - """ - |SELECT i, COUNT(1) FROM ( - |SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1) - |) tmp GROUP BY i - """.stripMargin, 2, "((`p` = '1') || (`p` = '2'))") + spark.range(0, 1000, 1).selectExpr("id as col") + .createOrReplaceTempView("temp") + + for (part <- Seq(1, 2, 3, 4)) { + sql( + s""" + |INSERT OVERWRITE TABLE t PARTITION (p='$part') + |SELECT col FROM temp""".stripMargin) + } + + assertPrunedPartitions( + "SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1)", 2, + "((`p` = '1') || (`p` = '2'))") + assertPrunedPartitions( + "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (i = 1 OR p = '2')", 4, + "") + assertPrunedPartitions( + "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '3' AND i = 3 )", 2, + "((`p` = '1') || (`p` = '3'))") + assertPrunedPartitions( + "SELECT * FROM t WHERE (p = '1' AND i = 2) OR (p = '2' OR p = '3')", 3, + "((`p` = '1') || ((`p` = '2') || (`p` = '3')))") + assertPrunedPartitions( + "SELECT * FROM t", 4, + "") + assertPrunedPartitions( + "SELECT * FROM t WHERE p = '1' AND i = 2", 1, + "(`p` = '1')") + assertPrunedPartitions( + """ + |SELECT i, COUNT(1) FROM ( + |SELECT * FROM t WHERE p = '1' OR (p = '2' AND i = 1) + |) tmp GROUP BY i + """.stripMargin, 2, "((`p` = '1') || (`p` = '2'))") + } } } } diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadWithHiveSupportSuite.scala b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadWithHiveSupportSuite.scala index 35dab79ff6..07901351fc 100644 --- a/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadWithHiveSupportSuite.scala +++ b/sql/hive/src/test/scala/org/apache/spark/sql/sources/BucketedReadWithHiveSupportSuite.scala @@ -17,10 +17,12 @@ package org.apache.spark.sql.sources +import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite import org.apache.spark.sql.hive.test.TestHiveSingleton import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION -class BucketedReadWithHiveSupportSuite extends BucketedReadSuite with TestHiveSingleton { +class BucketedReadWithHiveSupportSuite + extends BucketedReadSuite with DisableAdaptiveExecutionSuite with TestHiveSingleton { protected override def beforeAll(): Unit = { super.beforeAll() assert(spark.sparkContext.conf.get(CATALOG_IMPLEMENTATION) == "hive")