[Build] Enable MiMa checks for SQL
Now that 1.3 has been released, we should enable MiMa checks for the `sql` subproject. Author: Josh Rosen <joshrosen@databricks.com> Closes #5727 from JoshRosen/enable-more-mima-checks and squashes the following commits: 3ad302b [Josh Rosen] Merge remote-tracking branch 'origin/master' into enable-more-mima-checks 0c48e4d [Josh Rosen] Merge remote-tracking branch 'origin/master' into enable-more-mima-checks e276cee [Josh Rosen] Fix SQL MiMa checks via excludes and private[sql] 44d0d01 [Josh Rosen] Add back 'launcher' exclude 1aae027 [Josh Rosen] Enable MiMa checks for launcher and sql projects.
This commit is contained in:
parent
77cc25fb74
commit
fa01bec484
|
@ -88,6 +88,22 @@ object MimaExcludes {
|
|||
"org.apache.spark.mllib.linalg.Vector.toSparse"),
|
||||
ProblemFilters.exclude[MissingMethodProblem](
|
||||
"org.apache.spark.mllib.linalg.Vector.numActives")
|
||||
) ++ Seq(
|
||||
// This `protected[sql]` method was removed in 1.3.1
|
||||
ProblemFilters.exclude[MissingMethodProblem](
|
||||
"org.apache.spark.sql.SQLContext.checkAnalysis"),
|
||||
// This `private[sql]` class was removed in 1.4.0:
|
||||
ProblemFilters.exclude[MissingClassProblem](
|
||||
"org.apache.spark.sql.execution.AddExchange"),
|
||||
ProblemFilters.exclude[MissingClassProblem](
|
||||
"org.apache.spark.sql.execution.AddExchange$"),
|
||||
// These test support classes were moved out of src/main and into src/test:
|
||||
ProblemFilters.exclude[MissingClassProblem](
|
||||
"org.apache.spark.sql.parquet.ParquetTestData"),
|
||||
ProblemFilters.exclude[MissingClassProblem](
|
||||
"org.apache.spark.sql.parquet.ParquetTestData$"),
|
||||
ProblemFilters.exclude[MissingClassProblem](
|
||||
"org.apache.spark.sql.parquet.TestGroupWriteSupport")
|
||||
)
|
||||
|
||||
case v if v.startsWith("1.3") =>
|
||||
|
|
|
@ -156,9 +156,8 @@ object SparkBuild extends PomBuild {
|
|||
/* Enable tests settings for all projects except examples, assembly and tools */
|
||||
(allProjects ++ optionallyEnabledProjects).foreach(enable(TestSettings.settings))
|
||||
|
||||
// TODO: Add Sql to mima checks
|
||||
// TODO: remove launcher from this list after 1.3.
|
||||
allProjects.filterNot(x => Seq(spark, sql, hive, hiveThriftServer, catalyst, repl,
|
||||
// TODO: remove launcher from this list after 1.4.0
|
||||
allProjects.filterNot(x => Seq(spark, hive, hiveThriftServer, catalyst, repl,
|
||||
networkCommon, networkShuffle, networkYarn, launcher, unsafe).contains(x)).foreach {
|
||||
x => enable(MimaBuild.mimaSettings(sparkHome, x))(x)
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ object RDDConversions {
|
|||
}
|
||||
|
||||
/** Logical plan node for scanning data from an RDD. */
|
||||
case class LogicalRDD(output: Seq[Attribute], rdd: RDD[Row])(sqlContext: SQLContext)
|
||||
private[sql] case class LogicalRDD(output: Seq[Attribute], rdd: RDD[Row])(sqlContext: SQLContext)
|
||||
extends LogicalPlan with MultiInstanceRelation {
|
||||
|
||||
override def children: Seq[LogicalPlan] = Nil
|
||||
|
@ -105,11 +105,12 @@ case class LogicalRDD(output: Seq[Attribute], rdd: RDD[Row])(sqlContext: SQLCont
|
|||
}
|
||||
|
||||
/** Physical plan node for scanning data from an RDD. */
|
||||
case class PhysicalRDD(output: Seq[Attribute], rdd: RDD[Row]) extends LeafNode {
|
||||
private[sql] case class PhysicalRDD(output: Seq[Attribute], rdd: RDD[Row]) extends LeafNode {
|
||||
override def execute(): RDD[Row] = rdd
|
||||
}
|
||||
|
||||
/** Logical plan node for scanning data from a local collection. */
|
||||
private[sql]
|
||||
case class LogicalLocalTable(output: Seq[Attribute], rows: Seq[Row])(sqlContext: SQLContext)
|
||||
extends LogicalPlan with MultiInstanceRelation {
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ import org.apache.spark.sql.catalyst.expressions.Attribute
|
|||
/**
|
||||
* Physical plan node for scanning data from a local collection.
|
||||
*/
|
||||
case class LocalTableScan(output: Seq[Attribute], rows: Seq[Row]) extends LeafNode {
|
||||
private[sql] case class LocalTableScan(output: Seq[Attribute], rows: Seq[Row]) extends LeafNode {
|
||||
|
||||
private lazy val rdd = sqlContext.sparkContext.parallelize(rows)
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ trait RunnableCommand extends logical.Command {
|
|||
* A physical operator that executes the run method of a `RunnableCommand` and
|
||||
* saves the result to prevent multiple executions.
|
||||
*/
|
||||
case class ExecutedCommand(cmd: RunnableCommand) extends SparkPlan {
|
||||
private[sql] case class ExecutedCommand(cmd: RunnableCommand) extends SparkPlan {
|
||||
/**
|
||||
* A concrete command should override this lazy field to wrap up any side effects caused by the
|
||||
* command or any other computation that should be evaluated exactly once. The value of this field
|
||||
|
|
Loading…
Reference in a new issue