[SPARK-30170][SQL][MLLIB][TESTS] Eliminate compilation warnings: part 1

### What changes were proposed in this pull request?
- Replace `Seq[String]` by `Seq[_]` in `StopWordsRemoverSuite` because `String` type is unchecked due erasure.
- Throw an exception for default case in `MLTest.checkNominalOnDF` because we don't expect other attribute types currently.
- Explicitly cast float to double in `BigDecimal(y)`. This is what the `apply()` method does for `float`s.
- Replace deprecated `verifyZeroInteractions` by `verifyNoInteractions`.
- Equivalent replacement of `\0` by `\u0000` in `CSVExprUtilsSuite`
- Import `scala.language.implicitConversions` in `CollectionExpressionsSuite`, `HashExpressionsSuite` and in `ExpressionParserSuite`.

### Why are the changes needed?
The changes fix compiler warnings showed in the JIRA ticket https://issues.apache.org/jira/browse/SPARK-30170 . Eliminating the warning highlights other warnings which could take more attention to real problems.

### Does this PR introduce any user-facing change?
No

### How was this patch tested?
By existing test suites `StopWordsRemoverSuite`, `AnalysisExternalCatalogSuite`, `CSVExprUtilsSuite`, `CollectionExpressionsSuite`, `HashExpressionsSuite`, `ExpressionParserSuite` and sub-tests of `MLTest`.

Closes #26799 from MaxGekk/eliminate-warning-2.

Authored-by: Maxim Gekk <max.gekk@gmail.com>
Signed-off-by: Sean Owen <srowen@gmail.com>
This commit is contained in:
Maxim Gekk 2019-12-12 08:38:15 -06:00 committed by Sean Owen
parent ce61ee8941
commit 25de90e762
8 changed files with 17 additions and 7 deletions

View file

@ -242,7 +242,7 @@ class StopWordsRemoverSuite extends MLTest with DefaultReadWriteTest {
remover.transform(df)
.select("filtered1", "expected1", "filtered2", "expected2")
.collect().foreach {
case Row(r1: Seq[String], e1: Seq[String], r2: Seq[String], e2: Seq[String]) =>
case Row(r1: Seq[_], e1: Seq[_], r2: Seq[_], e2: Seq[_]) =>
assert(r1 === e1,
s"The result value is not correct after bucketing. Expected $e1 but found $r1")
assert(r2 === e2,
@ -268,7 +268,7 @@ class StopWordsRemoverSuite extends MLTest with DefaultReadWriteTest {
remover.transform(df)
.select("filtered1", "expected1", "filtered2", "expected2")
.collect().foreach {
case Row(r1: Seq[String], e1: Seq[String], r2: Seq[String], e2: Seq[String]) =>
case Row(r1: Seq[_], e1: Seq[_], r2: Seq[_], e2: Seq[_]) =>
assert(r1 === e1,
s"The result value is not correct after bucketing. Expected $e1 but found $r1")
assert(r2 === e2,

View file

@ -88,6 +88,8 @@ trait MLTest extends StreamTest with TempDirectory { self: Suite =>
val n = Attribute.fromStructField(dataframe.schema(colName)) match {
case binAttr: BinaryAttribute => Some(2)
case nomAttr: NominalAttribute => nomAttr.getNumValues
case unknown =>
throw new IllegalArgumentException(s"Attribute type: ${unknown.getClass.getName}")
}
assert(n.isDefined && n.get === numValues,
s"the number of values obtained from schema should be $numValues, but got $n")

View file

@ -78,8 +78,12 @@ case object FloatType extends FloatType {
}
trait FloatAsIfIntegral extends FloatIsConflicted with Integral[Float] {
def quot(x: Float, y: Float): Float = (BigDecimal(x) quot BigDecimal(y)).floatValue
def rem(x: Float, y: Float): Float = (BigDecimal(x) remainder BigDecimal(y)).floatValue
def quot(x: Float, y: Float): Float = {
(BigDecimal(x.toDouble) quot BigDecimal(y.toDouble)).floatValue
}
def rem(x: Float, y: Float): Float = {
(BigDecimal(x.toDouble) remainder BigDecimal(y.toDouble)).floatValue
}
}
object FloatAsIfIntegral extends FloatAsIfIntegral {

View file

@ -59,7 +59,7 @@ class AnalysisExternalCatalogSuite extends AnalysisTest with Matchers {
Alias(UnresolvedFunction("sum", Seq(UnresolvedAttribute("a")), isDistinct = false), "s")()
val plan = Project(Seq(func), testRelation)
analyzer.execute(plan)
verifyZeroInteractions(catalog)
verifyNoInteractions(catalog)
}
}
@ -73,7 +73,7 @@ class AnalysisExternalCatalogSuite extends AnalysisTest with Matchers {
ignoreIfExists = false)
reset(externCatalog)
catalog.functionExists(FunctionIdentifier("sum"))
verifyZeroInteractions(externCatalog)
verifyNoInteractions(externCatalog)
}
}

View file

@ -78,7 +78,7 @@ class CSVExprUtilsSuite extends SparkFunSuite {
// null character, expressed in Unicode literal syntax
("""\u0000""", Some("\u0000"), None),
// and specified directly
("\0", Some("\u0000"), None)
("\u0000", Some("\u0000"), None)
)
test("should correctly produce separator strings, or exceptions, from input") {

View file

@ -20,6 +20,7 @@ package org.apache.spark.sql.catalyst.expressions
import java.sql.{Date, Timestamp}
import java.util.TimeZone
import scala.language.implicitConversions
import scala.util.Random
import org.apache.spark.SparkFunSuite

View file

@ -21,6 +21,7 @@ import java.nio.charset.StandardCharsets
import java.time.{ZoneId, ZoneOffset}
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import org.apache.commons.codec.digest.DigestUtils
import org.scalatest.exceptions.TestFailedException

View file

@ -20,6 +20,8 @@ import java.sql.{Date, Timestamp}
import java.time.LocalDateTime
import java.util.concurrent.TimeUnit
import scala.language.implicitConversions
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, _}
import org.apache.spark.sql.catalyst.expressions._