[MINOR] Fix typos in comments and replace an explicit type with <>

## What changes were proposed in this pull request?
This PR fixed typos in comments and replace the explicit type with '<>' for Java 8+.

## How was this patch tested?
Manually tested.

Closes #25338 from younggyuchun/younggyu.

Authored-by: younggyu chun <younggyuchun@gmail.com>
Signed-off-by: Sean Owen <sean.owen@databricks.com>
This commit is contained in:
younggyu chun 2019-08-10 16:47:11 -05:00 committed by Sean Owen
parent ef80c32266
commit 8535df7261
17 changed files with 27 additions and 27 deletions

View file

@ -55,7 +55,7 @@ build_script:
environment:
NOT_CRAN: true
# See SPARK-27848. Currently installing some dependent packagess causes
# See SPARK-27848. Currently installing some dependent packages causes
# "(converted from warning) unable to identify current timezone 'C':" for an unknown reason.
# This environment variable works around to test SparkR against a higher version.
R_REMOTES_NO_ERRORS_FROM_WARNINGS: true

View file

@ -151,9 +151,9 @@ public class ChunkFetchIntegrationSuite {
clientFactory.createClient(TestUtils.getLocalHost(), server.getPort())) {
final Semaphore sem = new Semaphore(0);
res.successChunks = Collections.synchronizedSet(new HashSet<Integer>());
res.failedChunks = Collections.synchronizedSet(new HashSet<Integer>());
res.buffers = Collections.synchronizedList(new LinkedList<ManagedBuffer>());
res.successChunks = Collections.synchronizedSet(new HashSet<>());
res.failedChunks = Collections.synchronizedSet(new HashSet<>());
res.buffers = Collections.synchronizedList(new LinkedList<>());
ChunkReceivedCallback callback = new ChunkReceivedCallback() {
@Override

View file

@ -175,8 +175,8 @@ public class RpcIntegrationSuite {
final Semaphore sem = new Semaphore(0);
final RpcResult res = new RpcResult();
res.successMessages = Collections.synchronizedSet(new HashSet<String>());
res.errorMessages = Collections.synchronizedSet(new HashSet<String>());
res.successMessages = Collections.synchronizedSet(new HashSet<>());
res.errorMessages = Collections.synchronizedSet(new HashSet<>());
RpcResponseCallback callback = new RpcResponseCallback() {
@Override
@ -208,8 +208,8 @@ public class RpcIntegrationSuite {
TransportClient client = clientFactory.createClient(TestUtils.getLocalHost(), server.getPort());
final Semaphore sem = new Semaphore(0);
RpcResult res = new RpcResult();
res.successMessages = Collections.synchronizedSet(new HashSet<String>());
res.errorMessages = Collections.synchronizedSet(new HashSet<String>());
res.successMessages = Collections.synchronizedSet(new HashSet<>());
res.errorMessages = Collections.synchronizedSet(new HashSet<>());
for (String stream : streams) {
int idx = stream.lastIndexOf('/');

View file

@ -84,7 +84,7 @@ public class TransportClientFactorySuite {
try (TransportContext context = new TransportContext(conf, rpcHandler)) {
TransportClientFactory factory = context.createClientFactory();
Set<TransportClient> clients = Collections.synchronizedSet(
new HashSet<TransportClient>());
new HashSet<>());
AtomicInteger failed = new AtomicInteger();
Thread[] attempts = new Thread[maxConnections * 10];

View file

@ -170,9 +170,9 @@ public class ExternalShuffleIntegrationSuite {
TransportConf clientConf,
int port) throws Exception {
final FetchResult res = new FetchResult();
res.successBlocks = Collections.synchronizedSet(new HashSet<String>());
res.failedBlocks = Collections.synchronizedSet(new HashSet<String>());
res.buffers = Collections.synchronizedList(new LinkedList<ManagedBuffer>());
res.successBlocks = Collections.synchronizedSet(new HashSet<>());
res.failedBlocks = Collections.synchronizedSet(new HashSet<>());
res.buffers = Collections.synchronizedList(new LinkedList<>());
final Semaphore requestsRemaining = new Semaphore(0);

View file

@ -467,7 +467,7 @@ public class UTF8StringSuite {
)));
assertEquals(
fromString("translate"),
fromString("translate").translate(new HashMap<Character, Character>()));
fromString("translate").translate(new HashMap<>()));
assertEquals(
fromString("asae"),
fromString("translate").translate(ImmutableMap.of(

View file

@ -533,7 +533,7 @@ public class UnsafeShuffleWriterSuite {
long newPeakMemory;
try {
for (int i = 0; i < numRecordsPerPage * 10; i++) {
writer.insertRecordIntoSorter(new Tuple2<Object, Object>(1, 1));
writer.insertRecordIntoSorter(new Tuple2<>(1, 1));
newPeakMemory = writer.getPeakMemoryUsedBytes();
if (i % numRecordsPerPage == 0) {
// The first page is allocated in constructor, another page will be allocated after
@ -550,7 +550,7 @@ public class UnsafeShuffleWriterSuite {
newPeakMemory = writer.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);
for (int i = 0; i < numRecordsPerPage; i++) {
writer.insertRecordIntoSorter(new Tuple2<Object, Object>(1, 1));
writer.insertRecordIntoSorter(new Tuple2<>(1, 1));
}
newPeakMemory = writer.getPeakMemoryUsedBytes();
assertEquals(previousPeakMemory, newPeakMemory);

View file

@ -66,7 +66,7 @@ class AccumulatorSourceSuite extends SparkFunSuite {
assert(gauges.get("my-accumulator-2").getValue() == 456)
}
test("the double accumulators value propety is checked when the gauge's value is requested") {
test("the double accumulators value property is checked when the gauge's value is requested") {
val acc1 = new DoubleAccumulator()
acc1.add(123.123)
val acc2 = new DoubleAccumulator()

View file

@ -70,7 +70,7 @@ public final class JavaStructuredSessionization {
new FlatMapFunction<LineWithTimestamp, Event>() {
@Override
public Iterator<Event> call(LineWithTimestamp lineWithTimestamp) {
ArrayList<Event> eventList = new ArrayList<Event>();
ArrayList<Event> eventList = new ArrayList<>();
for (String word : lineWithTimestamp.getLine().split(" ")) {
eventList.add(new Event(word, lineWithTimestamp.getTimestamp()));
}

View file

@ -42,7 +42,7 @@ public class JavaConsumerStrategySuite implements Serializable {
final Collection<TopicPartition> parts = Arrays.asList(tp1, tp2);
final scala.collection.Iterable<TopicPartition> sParts =
JavaConverters.collectionAsScalaIterableConverter(parts).asScala();
final Map<String, Object> kafkaParams = new HashMap<String, Object>();
final Map<String, Object> kafkaParams = new HashMap<>();
kafkaParams.put("bootstrap.servers", "not used");
final scala.collection.Map<String, Object> sKafkaParams =
JavaConverters.mapAsScalaMapConverter(kafkaParams).asScala();

View file

@ -152,7 +152,7 @@ public class JavaDirectKafkaStreamSuite implements Serializable {
JavaDStream<String> unifiedStream = stream1.union(stream2);
final Set<String> result = Collections.synchronizedSet(new HashSet<String>());
final Set<String> result = Collections.synchronizedSet(new HashSet<>());
unifiedStream.foreachRDD(new VoidFunction<JavaRDD<String>>() {
@Override
public void call(JavaRDD<String> rdd) {

View file

@ -37,7 +37,7 @@ public class CommandBuilderUtilsSuite {
testOpt(" a b c \\\\ ", Arrays.asList("a", "b", "c", "\\"));
// Following tests ported from UtilsSuite.scala.
testOpt("", new ArrayList<String>());
testOpt("", new ArrayList<>());
testOpt("a", Arrays.asList("a"));
testOpt("aaa", Arrays.asList("aaa"));
testOpt("a b c", Arrays.asList("a", "b", "c"));

View file

@ -40,7 +40,7 @@ public class JavaSummarizerSuite extends SharedSparkSession {
@Override
public void setUp() throws IOException {
super.setUp();
List<LabeledPoint> points = new ArrayList<LabeledPoint>();
List<LabeledPoint> points = new ArrayList<>();
points.add(new LabeledPoint(0.0, Vectors.dense(1.0, 2.0)));
points.add(new LabeledPoint(0.0, Vectors.dense(3.0, 4.0)));

View file

@ -57,7 +57,7 @@ public class JavaRidgeRegressionSuite extends SharedSparkSession {
List<LabeledPoint> data = generateRidgeData(2 * numExamples, numFeatures, 10.0);
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
new ArrayList<LabeledPoint>(data.subList(0, numExamples)));
new ArrayList<>(data.subList(0, numExamples)));
List<LabeledPoint> validationData = data.subList(numExamples, 2 * numExamples);
RidgeRegressionWithSGD ridgeSGDImpl = new RidgeRegressionWithSGD();
@ -82,7 +82,7 @@ public class JavaRidgeRegressionSuite extends SharedSparkSession {
List<LabeledPoint> data = generateRidgeData(2 * numExamples, numFeatures, 10.0);
JavaRDD<LabeledPoint> testRDD = jsc.parallelize(
new ArrayList<LabeledPoint>(data.subList(0, numExamples)));
new ArrayList<>(data.subList(0, numExamples)));
List<LabeledPoint> validationData = data.subList(numExamples, 2 * numExamples);
RidgeRegressionModel model = RidgeRegressionWithSGD.train(testRDD.rdd(), 200, 1.0, 0.0);

View file

@ -251,7 +251,7 @@ case class WindowInPandasExec(
}
// Setting the window bounds argOffset for each UDF. For UDFs with bounded window, argOffset
// for the UDF is (lowerBoundOffet, upperBoundOffset, inputOffset1, inputOffset2, ...)
// for the UDF is (lowerBoundOffset, upperBoundOffset, inputOffset1, inputOffset2, ...)
// For UDFs with unbounded window, argOffset is (inputOffset1, inputOffset2, ...)
pyFuncs.indices.foreach { exprIndex =>
val frameIndex = expressionIndexToFrameIndex(exprIndex)

View file

@ -62,7 +62,7 @@ public class JavaDataFrameReaderWriterSuite {
@Test
public void testOptionsAPI() {
HashMap<String, String> map = new HashMap<String, String>();
HashMap<String, String> map = new HashMap<>();
map.put("e", "1");
spark
.read()

View file

@ -149,10 +149,10 @@ public class JavaMapWithStateSuite extends LocalJavaStreamingContext implements
inputStream.map(x -> new Tuple2<>(x, 1))).mapWithState(mapWithStateSpec);
List<Set<T>> collectedOutputs =
Collections.synchronizedList(new ArrayList<Set<T>>());
Collections.synchronizedList(new ArrayList<>());
mapWithStateDStream.foreachRDD(rdd -> collectedOutputs.add(Sets.newHashSet(rdd.collect())));
List<Set<Tuple2<K, S>>> collectedStateSnapshots =
Collections.synchronizedList(new ArrayList<Set<Tuple2<K, S>>>());
Collections.synchronizedList(new ArrayList<>());
mapWithStateDStream.stateSnapshots().foreachRDD(rdd ->
collectedStateSnapshots.add(Sets.newHashSet(rdd.collect())));
BatchCounter batchCounter = new BatchCounter(ssc.ssc());