[MINOR] Fix Java style errors and remove unused imports

## What changes were proposed in this pull request?

Fix Java style errors and remove unused imports, which are randomly found

## How was this patch tested?

Tested on my local machine.

Author: Xin Ren <iamshrek@126.com>

Closes #14161 from keypointt/SPARK-16437.
This commit is contained in:
Xin Ren 2016-07-13 10:47:07 +01:00 committed by Sean Owen
parent f156136dae
commit f73891e0b9
4 changed files with 4 additions and 7 deletions

View file

@ -24,7 +24,6 @@ import java.util.LinkedList;
import java.util.Map;
import org.apache.spark.unsafe.Platform;
import org.apache.spark.unsafe.memory.MemoryAllocator;
/**
* A simple {@link MemoryAllocator} that can allocate up to 16GB using a JVM long primitive array.

View file

@ -23,12 +23,12 @@ public interface MemoryAllocator {
* Whether to fill newly allocated and deallocated memory with 0xa5 and 0x5a bytes respectively.
* This helps catch misuse of uninitialized or freed memory, but imposes some overhead.
*/
public static final boolean MEMORY_DEBUG_FILL_ENABLED = Boolean.parseBoolean(
boolean MEMORY_DEBUG_FILL_ENABLED = Boolean.parseBoolean(
System.getProperty("spark.memory.debugFill", "false"));
// Same as jemalloc's debug fill values.
public static final byte MEMORY_DEBUG_FILL_CLEAN_VALUE = (byte)0xa5;
public static final byte MEMORY_DEBUG_FILL_FREED_VALUE = (byte)0x5a;
byte MEMORY_DEBUG_FILL_CLEAN_VALUE = (byte)0xa5;
byte MEMORY_DEBUG_FILL_FREED_VALUE = (byte)0x5a;
/**
* Allocates a contiguous block of memory. Note that the allocated memory is not guaranteed

View file

@ -780,8 +780,7 @@ private[sql] object ParquetFileFormat extends Logging {
val assumeBinaryIsString = sparkSession.sessionState.conf.isParquetBinaryAsString
val assumeInt96IsTimestamp = sparkSession.sessionState.conf.isParquetINT96AsTimestamp
val writeLegacyParquetFormat = sparkSession.sessionState.conf.writeLegacyParquetFormat
val serializedConf =
new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())
val serializedConf = new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())
// !! HACK ALERT !!
//

View file

@ -18,7 +18,6 @@
package org.apache.spark.sql.sources
import org.apache.spark.sql._
import org.apache.spark.sql.internal.SQLConf
private[sql] abstract class DataSourceTest extends QueryTest {