[MINOR][BUILD] Fix Java linter errors

## What changes were proposed in this pull request?

This PR cleans up a few Java linter errors for Apache Spark 2.2 release.

## How was this patch tested?

```bash
$ dev/lint-java
Using `mvn` from path: /usr/local/bin/mvn
Checkstyle checks passed.
```

We can check the result at Travis CI, [here](https://travis-ci.org/dongjoon-hyun/spark/builds/244297894).

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #18345 from dongjoon-hyun/fix_lint_java_2.
This commit is contained in:
Dongjoon Hyun 2017-06-19 20:17:54 +01:00 committed by Sean Owen
parent e5387018e7
commit ecc5631351
14 changed files with 16 additions and 30 deletions

View file

@ -50,7 +50,7 @@ import java.lang.annotation.Target;
@Target({ElementType.FIELD, ElementType.METHOD})
public @interface KVIndex {
public static final String NATURAL_INDEX_NAME = "__main__";
String NATURAL_INDEX_NAME = "__main__";
/**
* The name of the index to be created for the annotated entity. Must be unique within

View file

@ -18,9 +18,6 @@
package org.apache.spark.kvstore;
import java.io.Closeable;
import java.util.Iterator;
import java.util.Map;
import java.util.NoSuchElementException;
/**
* Abstraction for a local key/value store for storing app data.
@ -84,7 +81,7 @@ public interface KVStore extends Closeable {
*
* @param naturalKey The object's "natural key", which uniquely identifies it. Null keys
* are not allowed.
* @throws NoSuchElementException If an element with the given key does not exist.
* @throws java.util.NoSuchElementException If an element with the given key does not exist.
*/
<T> T read(Class<T> klass, Object naturalKey) throws Exception;
@ -107,7 +104,7 @@ public interface KVStore extends Closeable {
* @param type The object's type.
* @param naturalKey The object's "natural key", which uniquely identifies it. Null keys
* are not allowed.
* @throws NoSuchElementException If an element with the given key does not exist.
* @throws java.util.NoSuchElementException If an element with the given key does not exist.
*/
void delete(Class<?> type, Object naturalKey) throws Exception;

View file

@ -17,9 +17,6 @@
package org.apache.spark.kvstore;
import java.util.Iterator;
import java.util.Map;
import com.google.common.base.Preconditions;
/**

View file

@ -19,8 +19,6 @@ package org.apache.spark.kvstore;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.stream.Stream;

View file

@ -29,7 +29,6 @@ import java.util.concurrent.atomic.AtomicReference;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Objects;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.fusesource.leveldbjni.JniDBFactory;

View file

@ -18,7 +18,6 @@
package org.apache.spark.kvstore;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;

View file

@ -18,17 +18,12 @@
package org.apache.spark.kvstore;
import java.lang.reflect.Array;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import static java.nio.charset.StandardCharsets.UTF_8;
import com.google.common.base.Preconditions;
import com.google.common.base.Throwables;
import org.iq80.leveldb.WriteBatch;
/**

View file

@ -25,11 +25,9 @@ import java.util.Iterator;
import java.util.List;
import java.util.Random;
import com.google.common.base.Predicate;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import org.apache.commons.io.FileUtils;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
@ -50,7 +48,7 @@ public abstract class DBIteratorSuite {
private static List<CustomType1> clashingEntries;
private static KVStore db;
private static interface BaseComparator extends Comparator<CustomType1> {
private interface BaseComparator extends Comparator<CustomType1> {
/**
* Returns a comparator that falls back to natural order if this comparator's ordering
* returns equality for two elements. Used to mimic how the index sorts things internally.

View file

@ -20,9 +20,7 @@ package org.apache.spark.kvstore;
import java.io.File;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.NoSuchElementException;
import static java.nio.charset.StandardCharsets.UTF_8;
import org.apache.commons.io.FileUtils;
import org.iq80.leveldb.DBIterator;

View file

@ -157,7 +157,7 @@ public class OneForOneBlockFetcher {
private File targetFile = null;
private int chunkIndex;
public DownloadCallback(File targetFile, int chunkIndex) throws IOException {
DownloadCallback(File targetFile, int chunkIndex) throws IOException {
this.targetFile = targetFile;
this.channel = Channels.newChannel(new FileOutputStream(targetFile));
this.chunkIndex = chunkIndex;

View file

@ -364,7 +364,8 @@ public class UnsafeShuffleWriter<K, V> extends ShuffleWriter<K, V> {
// Use a counting output stream to avoid having to close the underlying file and ask
// the file system for its size after each partition is written.
final CountingOutputStream mergedFileOutputStream = new CountingOutputStream(bos);
final int inputBufferSizeInBytes = (int) sparkConf.getSizeAsKb("spark.shuffle.file.buffer", "32k") * 1024;
final int inputBufferSizeInBytes =
(int) sparkConf.getSizeAsKb("spark.shuffle.file.buffer", "32k") * 1024;
boolean threwException = true;
try {
@ -375,8 +376,9 @@ public class UnsafeShuffleWriter<K, V> extends ShuffleWriter<K, V> {
}
for (int partition = 0; partition < numPartitions; partition++) {
final long initialFileLength = mergedFileOutputStream.getByteCount();
// Shield the underlying output stream from close() and flush() calls, so that we can close the higher
// level streams to make sure all data is really flushed and internal state is cleaned.
// Shield the underlying output stream from close() and flush() calls, so that we can close
// the higher level streams to make sure all data is really flushed and internal state is
// cleaned.
OutputStream partitionOutput = new CloseAndFlushShieldOutputStream(
new TimeTrackingOutputStream(writeMetrics, mergedFileOutputStream));
partitionOutput = blockManager.serializerManager().wrapForEncryption(partitionOutput);

View file

@ -121,7 +121,7 @@ public class JavaALSExample {
// $example off$
userRecs.show();
movieRecs.show();
spark.stop();
}
}

View file

@ -124,7 +124,11 @@ public class JavaSQLDataSourceExample {
peopleDF.write().bucketBy(42, "name").sortBy("age").saveAsTable("people_bucketed");
// $example off:write_sorting_and_bucketing$
// $example on:write_partitioning$
usersDF.write().partitionBy("favorite_color").format("parquet").save("namesPartByColor.parquet");
usersDF
.write()
.partitionBy("favorite_color")
.format("parquet")
.save("namesPartByColor.parquet");
// $example off:write_partitioning$
// $example on:write_partition_and_bucket$
peopleDF

View file

@ -17,7 +17,6 @@
package org.apache.spark.sql.streaming;
import org.apache.spark.annotation.Experimental;
import org.apache.spark.annotation.InterfaceStability;
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes;