[SPARK-16371][SQL] Two follow-up tasks
## What changes were proposed in this pull request? This is a small follow-up for SPARK-16371: 1. Hide removeMetadata from public API. 2. Add JIRA ticket number to test case name. ## How was this patch tested? Updated a test comment. Author: Reynold Xin <rxin@databricks.com> Closes #14074 from rxin/parquet-filter.
This commit is contained in:
parent
9c041990cf
commit
8e3e4ed6c0
|
@ -413,10 +413,10 @@ object StructType extends AbstractDataType {
|
|||
StructType(fields.asScala)
|
||||
}
|
||||
|
||||
protected[sql] def fromAttributes(attributes: Seq[Attribute]): StructType =
|
||||
private[sql] def fromAttributes(attributes: Seq[Attribute]): StructType =
|
||||
StructType(attributes.map(a => StructField(a.name, a.dataType, a.nullable, a.metadata)))
|
||||
|
||||
def removeMetadata(key: String, dt: DataType): DataType =
|
||||
private[sql] def removeMetadata(key: String, dt: DataType): DataType =
|
||||
dt match {
|
||||
case StructType(fields) =>
|
||||
val newFields = fields.map { f =>
|
||||
|
|
|
@ -544,7 +544,7 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
|
|||
}
|
||||
}
|
||||
|
||||
test("Do not push down filters incorrectly when inner name and outer name are the same") {
|
||||
test("SPARK-16371 Do not push down filters when inner name and outer name are the same") {
|
||||
withParquetDataFrame((1 to 4).map(i => Tuple1(Tuple1(i)))) { implicit df =>
|
||||
// Here the schema becomes as below:
|
||||
//
|
||||
|
|
Loading…
Reference in a new issue