From 3956e95f059ba9599c3cfde29225177d29b2494a Mon Sep 17 00:00:00 2001 From: HyukjinKwon Date: Tue, 3 Mar 2020 10:24:50 +0900 Subject: [PATCH] [SPARK-25202][SQL][FOLLOW-UP] Keep the old parameter name 'pattern' at split in Scala API ### What changes were proposed in this pull request? To address the concern pointed out in https://github.com/apache/spark/pull/22227. This will make `split` source-compatible by removing minimal cosmetic changes. ### Why are the changes needed? For source compatibility. ### Does this PR introduce any user-facing change? No (it will prevent potential user-facing change from the original PR) ### How was this patch tested? Unittest was changed (in order for us to detect that source compatibility easily). Closes #27756 from HyukjinKwon/SPARK-25202. Authored-by: HyukjinKwon Signed-off-by: HyukjinKwon --- .../org/apache/spark/sql/functions.scala | 20 +++++++++---------- .../org/apache/spark/sql/DataFrameSuite.scala | 2 +- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala index c60df14f04..c6e8cf76fe 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/functions.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/functions.scala @@ -2460,25 +2460,25 @@ object functions { def soundex(e: Column): Column = withExpr { SoundEx(e.expr) } /** - * Splits str around matches of the given regex. + * Splits str around matches of the given pattern. * * @param str a string expression to split - * @param regex a string representing a regular expression. The regex string should be - * a Java regular expression. + * @param pattern a string representing a regular expression. The regex string should be + * a Java regular expression. * * @group string_funcs * @since 1.5.0 */ - def split(str: Column, regex: String): Column = withExpr { - StringSplit(str.expr, Literal(regex), Literal(-1)) + def split(str: Column, pattern: String): Column = withExpr { + StringSplit(str.expr, Literal(pattern), Literal(-1)) } /** - * Splits str around matches of the given regex. + * Splits str around matches of the given pattern. * * @param str a string expression to split - * @param regex a string representing a regular expression. The regex string should be - * a Java regular expression. + * @param pattern a string representing a regular expression. The regex string should be + * a Java regular expression. * @param limit an integer expression which controls the number of times the regex is applied. *
    *
  • limit greater than 0: The resulting array's length will not be more than limit, @@ -2491,8 +2491,8 @@ object functions { * @group string_funcs * @since 3.0.0 */ - def split(str: Column, regex: String, limit: Int): Column = withExpr { - StringSplit(str.expr, Literal(regex), Literal(limit)) + def split(str: Column, pattern: String, limit: Int): Column = withExpr { + StringSplit(str.expr, Literal(pattern), Literal(limit)) } /** diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala index 42a9073536..e74d553284 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameSuite.scala @@ -196,7 +196,7 @@ class DataFrameSuite extends QueryTest test("explode on output of array-valued function") { val df = Seq(("1,2"), ("4"), ("7,8,9")).toDF("csv") checkAnswer( - df.select(explode(split($"csv", ","))), + df.select(explode(split($"csv", pattern = ","))), Row("1") :: Row("2") :: Row("4") :: Row("7") :: Row("8") :: Row("9") :: Nil) }