diff --git a/appveyor.yml b/appveyor.yml index 8fb090cb0c..b0e946c543 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -55,7 +55,7 @@ build_script: environment: NOT_CRAN: true - # See SPARK-27848. Currently installing some dependent packagess causes + # See SPARK-27848. Currently installing some dependent packages causes # "(converted from warning) unable to identify current timezone 'C':" for an unknown reason. # This environment variable works around to test SparkR against a higher version. R_REMOTES_NO_ERRORS_FROM_WARNINGS: true diff --git a/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java b/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java index 5999b6255b..a818fe46b9 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/ChunkFetchIntegrationSuite.java @@ -151,9 +151,9 @@ public class ChunkFetchIntegrationSuite { clientFactory.createClient(TestUtils.getLocalHost(), server.getPort())) { final Semaphore sem = new Semaphore(0); - res.successChunks = Collections.synchronizedSet(new HashSet()); - res.failedChunks = Collections.synchronizedSet(new HashSet()); - res.buffers = Collections.synchronizedList(new LinkedList()); + res.successChunks = Collections.synchronizedSet(new HashSet<>()); + res.failedChunks = Collections.synchronizedSet(new HashSet<>()); + res.buffers = Collections.synchronizedList(new LinkedList<>()); ChunkReceivedCallback callback = new ChunkReceivedCallback() { @Override diff --git a/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java b/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java index 117f1e4d00..498dc51cdc 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/RpcIntegrationSuite.java @@ -175,8 +175,8 @@ public class RpcIntegrationSuite { final Semaphore sem = new Semaphore(0); final RpcResult res = new RpcResult(); - res.successMessages = Collections.synchronizedSet(new HashSet()); - res.errorMessages = Collections.synchronizedSet(new HashSet()); + res.successMessages = Collections.synchronizedSet(new HashSet<>()); + res.errorMessages = Collections.synchronizedSet(new HashSet<>()); RpcResponseCallback callback = new RpcResponseCallback() { @Override @@ -208,8 +208,8 @@ public class RpcIntegrationSuite { TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort()); final Semaphore sem = new Semaphore(0); RpcResult res = new RpcResult(); - res.successMessages = Collections.synchronizedSet(new HashSet()); - res.errorMessages = Collections.synchronizedSet(new HashSet()); + res.successMessages = Collections.synchronizedSet(new HashSet<>()); + res.errorMessages = Collections.synchronizedSet(new HashSet<>()); for (String stream : streams) { int idx = stream.lastIndexOf('/'); diff --git a/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java b/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java index b4caa87292..2aec4a33bb 100644 --- a/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java +++ b/common/network-common/src/test/java/org/apache/spark/network/TransportClientFactorySuite.java @@ -84,7 +84,7 @@ public class TransportClientFactorySuite { try (TransportContext context = new TransportContext(conf, rpcHandler)) { TransportClientFactory factory = context.createClientFactory(); Set clients = Collections.synchronizedSet( - new HashSet()); + new HashSet<>()); AtomicInteger failed = new AtomicInteger(); Thread[] attempts = new Thread[maxConnections * 10]; diff --git a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java index 61a58e9e45..9d398e3720 100644 --- a/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java +++ b/common/network-shuffle/src/test/java/org/apache/spark/network/shuffle/ExternalShuffleIntegrationSuite.java @@ -170,9 +170,9 @@ public class ExternalShuffleIntegrationSuite { TransportConf clientConf, int port) throws Exception { final FetchResult res = new FetchResult(); - res.successBlocks = Collections.synchronizedSet(new HashSet()); - res.failedBlocks = Collections.synchronizedSet(new HashSet()); - res.buffers = Collections.synchronizedList(new LinkedList()); + res.successBlocks = Collections.synchronizedSet(new HashSet<>()); + res.failedBlocks = Collections.synchronizedSet(new HashSet<>()); + res.buffers = Collections.synchronizedList(new LinkedList<>()); final Semaphore requestsRemaining = new Semaphore(0); diff --git a/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java b/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java index 0d441fb000..cd253c0cbc 100644 --- a/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java +++ b/common/unsafe/src/test/java/org/apache/spark/unsafe/types/UTF8StringSuite.java @@ -467,7 +467,7 @@ public class UTF8StringSuite { ))); assertEquals( fromString("translate"), - fromString("translate").translate(new HashMap())); + fromString("translate").translate(new HashMap<>())); assertEquals( fromString("asae"), fromString("translate").translate(ImmutableMap.of( diff --git a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java index 88125a6b93..6b83a984f0 100644 --- a/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java +++ b/core/src/test/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriterSuite.java @@ -533,7 +533,7 @@ public class UnsafeShuffleWriterSuite { long newPeakMemory; try { for (int i = 0; i < numRecordsPerPage * 10; i++) { - writer.insertRecordIntoSorter(new Tuple2(1, 1)); + writer.insertRecordIntoSorter(new Tuple2<>(1, 1)); newPeakMemory = writer.getPeakMemoryUsedBytes(); if (i % numRecordsPerPage == 0) { // The first page is allocated in constructor, another page will be allocated after @@ -550,7 +550,7 @@ public class UnsafeShuffleWriterSuite { newPeakMemory = writer.getPeakMemoryUsedBytes(); assertEquals(previousPeakMemory, newPeakMemory); for (int i = 0; i < numRecordsPerPage; i++) { - writer.insertRecordIntoSorter(new Tuple2(1, 1)); + writer.insertRecordIntoSorter(new Tuple2<>(1, 1)); } newPeakMemory = writer.getPeakMemoryUsedBytes(); assertEquals(previousPeakMemory, newPeakMemory); diff --git a/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala b/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala index 45e6e0b491..4262d26036 100644 --- a/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala +++ b/core/src/test/scala/org/apache/spark/metrics/source/AccumulatorSourceSuite.scala @@ -66,7 +66,7 @@ class AccumulatorSourceSuite extends SparkFunSuite { assert(gauges.get("my-accumulator-2").getValue() == 456) } - test("the double accumulators value propety is checked when the gauge's value is requested") { + test("the double accumulators value property is checked when the gauge's value is requested") { val acc1 = new DoubleAccumulator() acc1.add(123.123) val acc2 = new DoubleAccumulator() diff --git a/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java b/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java index 943e3d82f3..34ee235d8b 100644 --- a/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java +++ b/examples/src/main/java/org/apache/spark/examples/sql/streaming/JavaStructuredSessionization.java @@ -70,7 +70,7 @@ public final class JavaStructuredSessionization { new FlatMapFunction() { @Override public Iterator call(LineWithTimestamp lineWithTimestamp) { - ArrayList eventList = new ArrayList(); + ArrayList eventList = new ArrayList<>(); for (String word : lineWithTimestamp.getLine().split(" ")) { eventList.add(new Event(word, lineWithTimestamp.getTimestamp())); } diff --git a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java index 938cc8ddfb..dc364aca9b 100644 --- a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java +++ b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaConsumerStrategySuite.java @@ -42,7 +42,7 @@ public class JavaConsumerStrategySuite implements Serializable { final Collection parts = Arrays.asList(tp1, tp2); final scala.collection.Iterable sParts = JavaConverters.collectionAsScalaIterableConverter(parts).asScala(); - final Map kafkaParams = new HashMap(); + final Map kafkaParams = new HashMap<>(); kafkaParams.put("bootstrap.servers", "not used"); final scala.collection.Map sKafkaParams = JavaConverters.mapAsScalaMapConverter(kafkaParams).asScala(); diff --git a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java index dc9c13ba86..2b8b1852fe 100644 --- a/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java +++ b/external/kafka-0-10/src/test/java/org/apache/spark/streaming/kafka010/JavaDirectKafkaStreamSuite.java @@ -152,7 +152,7 @@ public class JavaDirectKafkaStreamSuite implements Serializable { JavaDStream unifiedStream = stream1.union(stream2); - final Set result = Collections.synchronizedSet(new HashSet()); + final Set result = Collections.synchronizedSet(new HashSet<>()); unifiedStream.foreachRDD(new VoidFunction>() { @Override public void call(JavaRDD rdd) { diff --git a/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java b/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java index 9795041233..22d9324ba4 100644 --- a/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java +++ b/launcher/src/test/java/org/apache/spark/launcher/CommandBuilderUtilsSuite.java @@ -37,7 +37,7 @@ public class CommandBuilderUtilsSuite { testOpt(" a b c \\\\ ", Arrays.asList("a", "b", "c", "\\")); // Following tests ported from UtilsSuite.scala. - testOpt("", new ArrayList()); + testOpt("", new ArrayList<>()); testOpt("a", Arrays.asList("a")); testOpt("aaa", Arrays.asList("aaa")); testOpt("a b c", Arrays.asList("a", "b", "c")); diff --git a/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java b/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java index 38ab39aa0f..163d2137a3 100644 --- a/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java +++ b/mllib/src/test/java/org/apache/spark/ml/stat/JavaSummarizerSuite.java @@ -40,7 +40,7 @@ public class JavaSummarizerSuite extends SharedSparkSession { @Override public void setUp() throws IOException { super.setUp(); - List points = new ArrayList(); + List points = new ArrayList<>(); points.add(new LabeledPoint(0.0, Vectors.dense(1.0, 2.0))); points.add(new LabeledPoint(0.0, Vectors.dense(3.0, 4.0))); diff --git a/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java b/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java index fb6c775a49..5a9389c424 100644 --- a/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java +++ b/mllib/src/test/java/org/apache/spark/mllib/regression/JavaRidgeRegressionSuite.java @@ -57,7 +57,7 @@ public class JavaRidgeRegressionSuite extends SharedSparkSession { List data = generateRidgeData(2 * numExamples, numFeatures, 10.0); JavaRDD testRDD = jsc.parallelize( - new ArrayList(data.subList(0, numExamples))); + new ArrayList<>(data.subList(0, numExamples))); List validationData = data.subList(numExamples, 2 * numExamples); RidgeRegressionWithSGD ridgeSGDImpl = new RidgeRegressionWithSGD(); @@ -82,7 +82,7 @@ public class JavaRidgeRegressionSuite extends SharedSparkSession { List data = generateRidgeData(2 * numExamples, numFeatures, 10.0); JavaRDD testRDD = jsc.parallelize( - new ArrayList(data.subList(0, numExamples))); + new ArrayList<>(data.subList(0, numExamples))); List validationData = data.subList(numExamples, 2 * numExamples); RidgeRegressionModel model = RidgeRegressionWithSGD.train(testRDD.rdd(), 200, 1.0, 0.0); diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala index 01ce07b133..cad89dedb8 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/python/WindowInPandasExec.scala @@ -251,7 +251,7 @@ case class WindowInPandasExec( } // Setting the window bounds argOffset for each UDF. For UDFs with bounded window, argOffset - // for the UDF is (lowerBoundOffet, upperBoundOffset, inputOffset1, inputOffset2, ...) + // for the UDF is (lowerBoundOffset, upperBoundOffset, inputOffset1, inputOffset2, ...) // For UDFs with unbounded window, argOffset is (inputOffset1, inputOffset2, ...) pyFuncs.indices.foreach { exprIndex => val frameIndex = expressionIndexToFrameIndex(exprIndex) diff --git a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java index 7babf7573c..d54be46c10 100644 --- a/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java +++ b/sql/core/src/test/java/test/org/apache/spark/sql/JavaDataFrameReaderWriterSuite.java @@ -62,7 +62,7 @@ public class JavaDataFrameReaderWriterSuite { @Test public void testOptionsAPI() { - HashMap map = new HashMap(); + HashMap map = new HashMap<>(); map.put("e", "1"); spark .read() diff --git a/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java b/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java index b1367b8f2a..2e00713b94 100644 --- a/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java +++ b/streaming/src/test/java/org/apache/spark/streaming/JavaMapWithStateSuite.java @@ -149,10 +149,10 @@ public class JavaMapWithStateSuite extends LocalJavaStreamingContext implements inputStream.map(x -> new Tuple2<>(x, 1))).mapWithState(mapWithStateSpec); List> collectedOutputs = - Collections.synchronizedList(new ArrayList>()); + Collections.synchronizedList(new ArrayList<>()); mapWithStateDStream.foreachRDD(rdd -> collectedOutputs.add(Sets.newHashSet(rdd.collect()))); List>> collectedStateSnapshots = - Collections.synchronizedList(new ArrayList>>()); + Collections.synchronizedList(new ArrayList<>()); mapWithStateDStream.stateSnapshots().foreachRDD(rdd -> collectedStateSnapshots.add(Sets.newHashSet(rdd.collect()))); BatchCounter batchCounter = new BatchCounter(ssc.ssc());