[SPARK-34952][SQL][FOLLOWUP] Normalize pushed down aggregate col name and group by col name
### What changes were proposed in this pull request?
Normalize pushed down aggregate col names and group by col names ...
### Why are the changes needed?
to handle case sensitive col names
### Does this PR introduce _any_ user-facing change?
No
### How was this patch tested?
Modify existing test
Closes #33739 from huaxingao/normalize.
Authored-by: Huaxin Gao <huaxin_gao@apple.com>
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
(cherry picked from commit 3f8ec0dae4
)
Signed-off-by: Dongjoon Hyun <dongjoon@apache.org>
This commit is contained in:
parent
c898a940e2
commit
ede1d1e9a7
|
@ -93,8 +93,12 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan] with PredicateHelper {
|
||||||
agg
|
agg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
val pushedAggregates = PushDownUtils
|
val normalizedAggregates = DataSourceStrategy.normalizeExprs(
|
||||||
.pushAggregates(sHolder.builder, aggregates, groupingExpressions)
|
aggregates, sHolder.relation.output).asInstanceOf[Seq[AggregateExpression]]
|
||||||
|
val normalizedGroupingExpressions = DataSourceStrategy.normalizeExprs(
|
||||||
|
groupingExpressions, sHolder.relation.output)
|
||||||
|
val pushedAggregates = PushDownUtils.pushAggregates(
|
||||||
|
sHolder.builder, normalizedAggregates, normalizedGroupingExpressions)
|
||||||
if (pushedAggregates.isEmpty) {
|
if (pushedAggregates.isEmpty) {
|
||||||
aggNode // return original plan node
|
aggNode // return original plan node
|
||||||
} else {
|
} else {
|
||||||
|
@ -115,7 +119,7 @@ object V2ScanRelationPushDown extends Rule[LogicalPlan] with PredicateHelper {
|
||||||
// scalastyle:on
|
// scalastyle:on
|
||||||
val newOutput = scan.readSchema().toAttributes
|
val newOutput = scan.readSchema().toAttributes
|
||||||
assert(newOutput.length == groupingExpressions.length + aggregates.length)
|
assert(newOutput.length == groupingExpressions.length + aggregates.length)
|
||||||
val groupAttrs = groupingExpressions.zip(newOutput).map {
|
val groupAttrs = normalizedGroupingExpressions.zip(newOutput).map {
|
||||||
case (a: Attribute, b: Attribute) => b.withExprId(a.exprId)
|
case (a: Attribute, b: Attribute) => b.withExprId(a.exprId)
|
||||||
case (_, b) => b
|
case (_, b) => b
|
||||||
}
|
}
|
||||||
|
|
|
@ -239,8 +239,8 @@ class JDBCV2Suite extends QueryTest with SharedSparkSession with ExplainSuiteHel
|
||||||
}
|
}
|
||||||
|
|
||||||
test("scan with aggregate push-down: MAX MIN with filter and group by") {
|
test("scan with aggregate push-down: MAX MIN with filter and group by") {
|
||||||
val df = sql("select MAX(SALARY), MIN(BONUS) FROM h2.test.employee where dept > 0" +
|
val df = sql("select MAX(SaLaRY), MIN(BONUS) FROM h2.test.employee where dept > 0" +
|
||||||
" group by DEPT")
|
" group by DePt")
|
||||||
val filters = df.queryExecution.optimizedPlan.collect {
|
val filters = df.queryExecution.optimizedPlan.collect {
|
||||||
case f: Filter => f
|
case f: Filter => f
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue