[MINOR] Resolve a number of miscellaneous build warnings

## What changes were proposed in this pull request?

This change resolves a number of build warnings that have accumulated, before 2.x. It does not address a large number of deprecation warnings, especially related to the Accumulator API. That will happen separately.

## How was this patch tested?

Jenkins

Author: Sean Owen <sowen@cloudera.com>

Closes #13377 from srowen/BuildWarnings.
This commit is contained in:
Sean Owen 2016-05-29 16:48:14 -05:00
parent 472f16181d
commit ce1572d16f
8 changed files with 15 additions and 6 deletions

View file

@ -155,8 +155,8 @@ public final class Platform {
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
public static ByteBuffer allocateDirectBuffer(int size) { public static ByteBuffer allocateDirectBuffer(int size) {
try { try {
Class cls = Class.forName("java.nio.DirectByteBuffer"); Class<?> cls = Class.forName("java.nio.DirectByteBuffer");
Constructor constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE); Constructor<?> constructor = cls.getDeclaredConstructor(Long.TYPE, Integer.TYPE);
constructor.setAccessible(true); constructor.setAccessible(true);
Field cleanerField = cls.getDeclaredField("cleaner"); Field cleanerField = cls.getDeclaredField("cleaner");
cleanerField.setAccessible(true); cleanerField.setAccessible(true);

View file

@ -72,7 +72,9 @@ public class JavaPrefixSpanSuite extends SharedSparkSession {
try { try {
model.save(spark.sparkContext(), outputPath); model.save(spark.sparkContext(), outputPath);
PrefixSpanModel newModel = PrefixSpanModel.load(spark.sparkContext(), outputPath); @SuppressWarnings("unchecked")
PrefixSpanModel<Integer> newModel =
(PrefixSpanModel<Integer>) PrefixSpanModel.load(spark.sparkContext(), outputPath);
JavaRDD<FreqSequence<Integer>> freqSeqs = newModel.freqSequences().toJavaRDD(); JavaRDD<FreqSequence<Integer>> freqSeqs = newModel.freqSequences().toJavaRDD();
List<FreqSequence<Integer>> localFreqSeqs = freqSeqs.collect(); List<FreqSequence<Integer>> localFreqSeqs = freqSeqs.collect();
Assert.assertEquals(5, localFreqSeqs.size()); Assert.assertEquals(5, localFreqSeqs.size());

View file

@ -16,6 +16,8 @@
*/ */
package org.apache.spark.mllib.fpm package org.apache.spark.mllib.fpm
import scala.language.existentials
import org.apache.spark.SparkFunSuite import org.apache.spark.SparkFunSuite
import org.apache.spark.mllib.util.MLlibTestSparkContext import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.util.Utils import org.apache.spark.util.Utils

View file

@ -288,7 +288,7 @@ class ScalaReflectionSuite extends SparkFunSuite {
assert(serializer.children.head.asInstanceOf[Literal].value === UTF8String.fromString("value")) assert(serializer.children.head.asInstanceOf[Literal].value === UTF8String.fromString("value"))
assert(serializer.children.last.isInstanceOf[NewInstance]) assert(serializer.children.last.isInstanceOf[NewInstance])
assert(serializer.children.last.asInstanceOf[NewInstance] assert(serializer.children.last.asInstanceOf[NewInstance]
.cls.isInstanceOf[Class[org.apache.spark.sql.catalyst.util.GenericArrayData]]) .cls.isAssignableFrom(classOf[org.apache.spark.sql.catalyst.util.GenericArrayData]))
} }
private val dataTypeForComplexData = dataTypeFor[ComplexData] private val dataTypeForComplexData = dataTypeFor[ComplexData]

View file

@ -365,7 +365,8 @@ class ExpressionEncoderSuite extends PlanTest with AnalysisTest {
Arrays.deepEquals(b1.asInstanceOf[Array[AnyRef]], b2.asInstanceOf[Array[AnyRef]]) Arrays.deepEquals(b1.asInstanceOf[Array[AnyRef]], b2.asInstanceOf[Array[AnyRef]])
case (b1: Array[_], b2: Array[_]) => case (b1: Array[_], b2: Array[_]) =>
Arrays.equals(b1.asInstanceOf[Array[AnyRef]], b2.asInstanceOf[Array[AnyRef]]) Arrays.equals(b1.asInstanceOf[Array[AnyRef]], b2.asInstanceOf[Array[AnyRef]])
case (left: Comparable[Any], right: Comparable[Any]) => left.compareTo(right) == 0 case (left: Comparable[_], right: Comparable[_]) =>
left.asInstanceOf[Comparable[Any]].compareTo(right) == 0
case _ => input == convertedBack case _ => input == convertedBack
} }

View file

@ -17,6 +17,8 @@
package org.apache.spark.sql package org.apache.spark.sql
import scala.language.existentials
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.execution.joins._ import org.apache.spark.sql.execution.joins._

View file

@ -19,6 +19,8 @@ package org.apache.spark.sql.execution.datasources
import java.io.File import java.io.File
import scala.language.reflectiveCalls
import org.apache.hadoop.fs.Path import org.apache.hadoop.fs.Path
import org.apache.spark.sql.catalyst.util._ import org.apache.spark.sql.catalyst.util._

View file

@ -20,6 +20,7 @@ import java.io.{DataOutputStream, File, FileOutputStream}
import scala.annotation.tailrec import scala.annotation.tailrec
import scala.concurrent.duration._ import scala.concurrent.duration._
import scala.language.postfixOps
import org.apache.hadoop.fs.Path import org.apache.hadoop.fs.Path
import org.apache.hadoop.yarn.api.records.ApplicationId import org.apache.hadoop.yarn.api.records.ApplicationId
@ -27,7 +28,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration
import org.apache.hadoop.yarn.server.api.{ApplicationInitializationContext, ApplicationTerminationContext} import org.apache.hadoop.yarn.server.api.{ApplicationInitializationContext, ApplicationTerminationContext}
import org.scalatest.{BeforeAndAfterEach, Matchers} import org.scalatest.{BeforeAndAfterEach, Matchers}
import org.scalatest.concurrent.Eventually._ import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.Timeouts
import org.apache.spark.SparkFunSuite import org.apache.spark.SparkFunSuite
import org.apache.spark.network.shuffle.ShuffleTestAccessor import org.apache.spark.network.shuffle.ShuffleTestAccessor