[SPARK-13702][CORE][SQL][MLLIB] Use diamond operator for generic instance creation in Java code.

## What changes were proposed in this pull request?

In order to make `docs/examples` (and other related code) more simple/readable/user-friendly, this PR replaces existing codes like the followings by using `diamond` operator.

```
-    final ArrayList<Product2<Object, Object>> dataToWrite =
-      new ArrayList<Product2<Object, Object>>();
+    final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
```

Java 7 or higher supports **diamond** operator which replaces the type arguments required to invoke the constructor of a generic class with an empty set of type parameters (<>). Currently, Spark Java code use mixed usage of this.

## How was this patch tested?

Manual.
Pass the existing tests.

Author: Dongjoon Hyun <dongjoon@apache.org>

Closes #11541 from dongjoon-hyun/SPARK-13702.
This commit is contained in:
Dongjoon Hyun 2016-03-09 10:31:26 +00:00 committed by Sean Owen
parent cbff2803ef
commit c3689bc24e
59 changed files with 129 additions and 134 deletions

View file

@ -194,8 +194,8 @@ public class TransportClientFactory implements Closeable {
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, conf.connectionTimeoutMs())
.option(ChannelOption.ALLOCATOR, pooledAllocator);
final AtomicReference<TransportClient> clientRef = new AtomicReference<TransportClient>();
final AtomicReference<Channel> channelRef = new AtomicReference<Channel>();
final AtomicReference<TransportClient> clientRef = new AtomicReference<>();
final AtomicReference<Channel> channelRef = new AtomicReference<>();
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override

View file

@ -84,9 +84,9 @@ final class ShuffleExternalSorter extends MemoryConsumer {
* this might not be necessary if we maintained a pool of re-usable pages in the TaskMemoryManager
* itself).
*/
private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<MemoryBlock>();
private final LinkedList<MemoryBlock> allocatedPages = new LinkedList<>();
private final LinkedList<SpillInfo> spills = new LinkedList<SpillInfo>();
private final LinkedList<SpillInfo> spills = new LinkedList<>();
/** Peak memory used by this sorter so far, in bytes. **/
private long peakMemoryUsedBytes;

View file

@ -29,7 +29,7 @@ public enum TaskSorting {
private final Set<String> alternateNames;
private TaskSorting(String... names) {
alternateNames = new HashSet<String>();
alternateNames = new HashSet<>();
for (String n: names) {
alternateNames.add(n);
}

View file

@ -88,7 +88,7 @@ public class SparkLauncherSuite {
@Test
public void testChildProcLauncher() throws Exception {
SparkSubmitOptionParser opts = new SparkSubmitOptionParser();
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
env.put("SPARK_PRINT_LAUNCH_COMMAND", "1");
SparkLauncher launcher = new SparkLauncher(env)

View file

@ -67,7 +67,7 @@ public class UnsafeShuffleWriterSuite {
File mergedOutputFile;
File tempDir;
long[] partitionSizesInMergedFile;
final LinkedList<File> spillFilesCreated = new LinkedList<File>();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
SparkConf conf;
final Serializer serializer = new KryoSerializer(new SparkConf());
TaskMetrics taskMetrics;
@ -217,7 +217,7 @@ public class UnsafeShuffleWriterSuite {
}
private List<Tuple2<Object, Object>> readRecordsFromFile() throws IOException {
final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<Tuple2<Object, Object>>();
final ArrayList<Tuple2<Object, Object>> recordsList = new ArrayList<>();
long startOffset = 0;
for (int i = 0; i < NUM_PARTITITONS; i++) {
final long partitionSize = partitionSizesInMergedFile[i];
@ -286,8 +286,7 @@ public class UnsafeShuffleWriterSuite {
@Test
public void writeWithoutSpilling() throws Exception {
// In this example, each partition should have exactly one record:
final ArrayList<Product2<Object, Object>> dataToWrite =
new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i = 0; i < NUM_PARTITITONS; i++) {
dataToWrite.add(new Tuple2<Object, Object>(i, i));
}
@ -325,8 +324,7 @@ public class UnsafeShuffleWriterSuite {
conf.set("spark.shuffle.compress", "false");
}
final UnsafeShuffleWriter<Object, Object> writer = createWriter(transferToEnabled);
final ArrayList<Product2<Object, Object>> dataToWrite =
new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
for (int i : new int[] { 1, 2, 3, 4, 4, 2 }) {
dataToWrite.add(new Tuple2<Object, Object>(i, i));
}
@ -403,7 +401,7 @@ public class UnsafeShuffleWriterSuite {
public void writeEnoughDataToTriggerSpill() throws Exception {
memoryManager.limit(PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES);
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
final byte[] bigByteArray = new byte[PackedRecordPointer.MAXIMUM_PAGE_SIZE_BYTES / 10];
for (int i = 0; i < 10 + 1; i++) {
dataToWrite.add(new Tuple2<Object, Object>(i, bigByteArray));
@ -445,8 +443,7 @@ public class UnsafeShuffleWriterSuite {
@Test
public void writeRecordsThatAreBiggerThanDiskWriteBufferSize() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite =
new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
final byte[] bytes = new byte[(int) (ShuffleExternalSorter.DISK_WRITE_BUFFER_SIZE * 2.5)];
new Random(42).nextBytes(bytes);
dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(bytes)));
@ -461,7 +458,7 @@ public class UnsafeShuffleWriterSuite {
@Test
public void writeRecordsThatAreBiggerThanMaxRecordSize() throws Exception {
final UnsafeShuffleWriter<Object, Object> writer = createWriter(false);
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<Product2<Object, Object>>();
final ArrayList<Product2<Object, Object>> dataToWrite = new ArrayList<>();
dataToWrite.add(new Tuple2<Object, Object>(1, ByteBuffer.wrap(new byte[1])));
// We should be able to write a record that's right _at_ the max record size
final byte[] atMaxRecordSize = new byte[(int) taskMemoryManager.pageSizeBytes() - 4];
@ -498,7 +495,7 @@ public class UnsafeShuffleWriterSuite {
taskMemoryManager = spy(taskMemoryManager);
when(taskMemoryManager.pageSizeBytes()).thenReturn(pageSizeBytes);
final UnsafeShuffleWriter<Object, Object> writer =
new UnsafeShuffleWriter<Object, Object>(
new UnsafeShuffleWriter<>(
blockManager,
shuffleBlockResolver,
taskMemoryManager,

View file

@ -66,7 +66,7 @@ public abstract class AbstractBytesToBytesMapSuite {
private TaskMemoryManager taskMemoryManager;
private static final long PAGE_SIZE_BYTES = 1L << 26; // 64 megabytes
final LinkedList<File> spillFilesCreated = new LinkedList<File>();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
File tempDir;
@Mock(answer = RETURNS_SMART_NULLS) BlockManager blockManager;
@ -397,7 +397,7 @@ public abstract class AbstractBytesToBytesMapSuite {
final int size = 65536;
// Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
// into ByteBuffers in order to use them as keys here.
final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
final Map<ByteBuffer, byte[]> expected = new HashMap<>();
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, size, PAGE_SIZE_BYTES);
try {
// Fill the map to 90% full so that we can trigger probing
@ -453,7 +453,7 @@ public abstract class AbstractBytesToBytesMapSuite {
final BytesToBytesMap map = new BytesToBytesMap(taskMemoryManager, 64, pageSizeBytes);
// Java arrays' hashCodes() aren't based on the arrays' contents, so we need to wrap arrays
// into ByteBuffers in order to use them as keys here.
final Map<ByteBuffer, byte[]> expected = new HashMap<ByteBuffer, byte[]>();
final Map<ByteBuffer, byte[]> expected = new HashMap<>();
try {
for (int i = 0; i < 1000; i++) {
final byte[] key = getRandomByteArray(rand.nextInt(128));

View file

@ -76,7 +76,7 @@ public class TestTimSort {
* @param length The sum of all run lengths that will be added to <code>runs</code>.
*/
private static List<Long> runsJDKWorstCase(int minRun, int length) {
List<Long> runs = new ArrayList<Long>();
List<Long> runs = new ArrayList<>();
long runningTotal = 0, Y = minRun + 4, X = minRun;

View file

@ -56,7 +56,7 @@ import static org.mockito.Mockito.*;
public class UnsafeExternalSorterSuite {
final LinkedList<File> spillFilesCreated = new LinkedList<File>();
final LinkedList<File> spillFilesCreated = new LinkedList<>();
final TestMemoryManager memoryManager =
new TestMemoryManager(new SparkConf().set("spark.memory.offHeap.enabled", "false"));
final TaskMemoryManager taskMemoryManager = new TaskMemoryManager(memoryManager, 0);

View file

@ -760,7 +760,7 @@ JavaRDD<String> people = sc.textFile("examples/src/main/resources/people.txt");
String schemaString = "name age";
// Generate the schema based on the string of schema
List<StructField> fields = new ArrayList<StructField>();
List<StructField> fields = new ArrayList<>();
for (String fieldName: schemaString.split(" ")) {
fields.add(DataTypes.createStructField(fieldName, DataTypes.StringType, true));
}
@ -1935,7 +1935,7 @@ val jdbcDF = sqlContext.read.format("jdbc").options(
{% highlight java %}
Map<String, String> options = new HashMap<String, String>();
Map<String, String> options = new HashMap<>();
options.put("url", "jdbc:postgresql:dbserver");
options.put("dbtable", "schema.tablename");

View file

@ -186,7 +186,7 @@ Next, we want to count these words.
JavaPairDStream<String, Integer> pairs = words.mapToPair(
new PairFunction<String, String, Integer>() {
@Override public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
});
JavaPairDStream<String, Integer> wordCounts = pairs.reduceByKey(
@ -2095,7 +2095,7 @@ unifiedStream.print()
<div data-lang="java" markdown="1">
{% highlight java %}
int numStreams = 5;
List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<JavaPairDStream<String, String>>(numStreams);
List<JavaPairDStream<String, String>> kafkaStreams = new ArrayList<>(numStreams);
for (int i = 0; i < numStreams; i++) {
kafkaStreams.add(KafkaUtils.createStream(...));
}

View file

@ -108,7 +108,7 @@ public final class JavaLogQuery {
JavaPairRDD<Tuple3<String, String, String>, Stats> extracted = dataSet.mapToPair(new PairFunction<String, Tuple3<String, String, String>, Stats>() {
@Override
public Tuple2<Tuple3<String, String, String>, Stats> call(String s) {
return new Tuple2<Tuple3<String, String, String>, Stats>(extractKey(s), extractStats(s));
return new Tuple2<>(extractKey(s), extractStats(s));
}
});

View file

@ -88,7 +88,7 @@ public final class JavaPageRank {
@Override
public Tuple2<String, String> call(String s) {
String[] parts = SPACES.split(s);
return new Tuple2<String, String>(parts[0], parts[1]);
return new Tuple2<>(parts[0], parts[1]);
}
}).distinct().groupByKey().cache();

View file

@ -38,7 +38,7 @@ public final class JavaSparkPi {
int slices = (args.length == 1) ? Integer.parseInt(args[0]) : 2;
int n = 100000 * slices;
List<Integer> l = new ArrayList<Integer>(n);
List<Integer> l = new ArrayList<>(n);
for (int i = 0; i < n; i++) {
l.add(i);
}

View file

@ -41,16 +41,16 @@ public final class JavaTC {
private static final Random rand = new Random(42);
static List<Tuple2<Integer, Integer>> generateGraph() {
Set<Tuple2<Integer, Integer>> edges = new HashSet<Tuple2<Integer, Integer>>(numEdges);
Set<Tuple2<Integer, Integer>> edges = new HashSet<>(numEdges);
while (edges.size() < numEdges) {
int from = rand.nextInt(numVertices);
int to = rand.nextInt(numVertices);
Tuple2<Integer, Integer> e = new Tuple2<Integer, Integer>(from, to);
Tuple2<Integer, Integer> e = new Tuple2<>(from, to);
if (from != to) {
edges.add(e);
}
}
return new ArrayList<Tuple2<Integer, Integer>>(edges);
return new ArrayList<>(edges);
}
static class ProjectFn implements PairFunction<Tuple2<Integer, Tuple2<Integer, Integer>>,
@ -59,7 +59,7 @@ public final class JavaTC {
@Override
public Tuple2<Integer, Integer> call(Tuple2<Integer, Tuple2<Integer, Integer>> triple) {
return new Tuple2<Integer, Integer>(triple._2()._2(), triple._2()._1());
return new Tuple2<>(triple._2()._2(), triple._2()._1());
}
}
@ -79,7 +79,7 @@ public final class JavaTC {
new PairFunction<Tuple2<Integer, Integer>, Integer, Integer>() {
@Override
public Tuple2<Integer, Integer> call(Tuple2<Integer, Integer> e) {
return new Tuple2<Integer, Integer>(e._2(), e._1());
return new Tuple2<>(e._2(), e._1());
}
});

View file

@ -55,7 +55,7 @@ public final class JavaWordCount {
JavaPairRDD<String, Integer> ones = words.mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
});

View file

@ -52,7 +52,7 @@ public class JavaElementwiseProductExample {
RowFactory.create("b", Vectors.dense(4.0, 5.0, 6.0))
));
List<StructField> fields = new ArrayList<StructField>(2);
List<StructField> fields = new ArrayList<>(2);
fields.add(DataTypes.createStructField("id", DataTypes.StringType, false));
fields.add(DataTypes.createStructField("vector", new VectorUDT(), false));

View file

@ -54,7 +54,7 @@ class JavaDecisionTreeClassificationExample {
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Integer numClasses = 2;
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
String impurity = "gini";
Integer maxDepth = 5;
Integer maxBins = 32;
@ -68,7 +68,7 @@ class JavaDecisionTreeClassificationExample {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testErr =

View file

@ -54,7 +54,7 @@ class JavaDecisionTreeRegressionExample {
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
String impurity = "variance";
Integer maxDepth = 5;
Integer maxBins = 32;
@ -68,7 +68,7 @@ class JavaDecisionTreeRegressionExample {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testMSE =

View file

@ -58,7 +58,7 @@ public class JavaGradientBoostingClassificationExample {
boostingStrategy.getTreeStrategy().setNumClasses(2);
boostingStrategy.getTreeStrategy().setMaxDepth(5);
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
final GradientBoostedTreesModel model =
@ -69,7 +69,7 @@ public class JavaGradientBoostingClassificationExample {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testErr =

View file

@ -57,7 +57,7 @@ public class JavaGradientBoostingRegressionExample {
boostingStrategy.setNumIterations(3); // Note: Use more iterations in practice.
boostingStrategy.getTreeStrategy().setMaxDepth(5);
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
boostingStrategy.treeStrategy().setCategoricalFeaturesInfo(categoricalFeaturesInfo);
final GradientBoostedTreesModel model =
@ -68,7 +68,7 @@ public class JavaGradientBoostingRegressionExample {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testMSE =

View file

@ -62,7 +62,7 @@ public class JavaIsotonicRegressionExample {
@Override
public Tuple2<Double, Double> call(Tuple3<Double, Double, Double> point) {
Double predictedLabel = model.predict(point._2());
return new Tuple2<Double, Double>(predictedLabel, point._1());
return new Tuple2<>(predictedLabel, point._1());
}
}
);

View file

@ -70,7 +70,7 @@ public class JavaLinearRegressionWithSGDExample {
new Function<LabeledPoint, Tuple2<Double, Double>>() {
public Tuple2<Double, Double> call(LabeledPoint point) {
double prediction = model.predict(point.features());
return new Tuple2<Double, Double>(prediction, point.label());
return new Tuple2<>(prediction, point.label());
}
}
);

View file

@ -46,7 +46,7 @@ public class JavaNaiveBayesExample {
test.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
double accuracy = predictionAndLabel.filter(new Function<Tuple2<Double, Double>, Boolean>() {

View file

@ -42,7 +42,7 @@ public class JavaPCAExample {
// $example on$
double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}};
LinkedList<Vector> rowsList = new LinkedList<Vector>();
LinkedList<Vector> rowsList = new LinkedList<>();
for (int i = 0; i < array.length; i++) {
Vector currentRow = Vectors.dense(array[i]);
rowsList.add(currentRow);

View file

@ -50,7 +50,7 @@ public class JavaRandomForestClassificationExample {
// Train a RandomForest model.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Integer numClasses = 2;
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
Integer numTrees = 3; // Use more in practice.
String featureSubsetStrategy = "auto"; // Let the algorithm choose.
String impurity = "gini";
@ -67,7 +67,7 @@ public class JavaRandomForestClassificationExample {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testErr =

View file

@ -51,7 +51,7 @@ public class JavaRandomForestRegressionExample {
// Set parameters.
// Empty categoricalFeaturesInfo indicates all features are continuous.
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
Integer numTrees = 3; // Use more in practice.
String featureSubsetStrategy = "auto"; // Let the algorithm choose.
String impurity = "variance";
@ -67,7 +67,7 @@ public class JavaRandomForestRegressionExample {
testData.mapToPair(new PairFunction<LabeledPoint, Double, Double>() {
@Override
public Tuple2<Double, Double> call(LabeledPoint p) {
return new Tuple2<Double, Double>(model.predict(p.features()), p.label());
return new Tuple2<>(model.predict(p.features()), p.label());
}
});
Double testMSE =

View file

@ -64,8 +64,7 @@ public class JavaRecommendationExample {
model.predict(JavaRDD.toRDD(userProducts)).toJavaRDD().map(
new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
return new Tuple2<Tuple2<Integer, Integer>, Double>(
new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
return new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating());
}
}
));
@ -73,8 +72,7 @@ public class JavaRecommendationExample {
JavaPairRDD.fromJavaRDD(ratings.map(
new Function<Rating, Tuple2<Tuple2<Integer, Integer>, Double>>() {
public Tuple2<Tuple2<Integer, Integer>, Double> call(Rating r){
return new Tuple2<Tuple2<Integer, Integer>, Double>(
new Tuple2<Integer, Integer>(r.user(), r.product()), r.rating());
return new Tuple2<>(new Tuple2<>(r.user(), r.product()), r.rating());
}
}
)).join(predictions).values();

View file

@ -44,7 +44,7 @@ public class JavaSVDExample {
// $example on$
double[][] array = {{1.12, 2.05, 3.12}, {5.56, 6.28, 8.94}, {10.2, 8.0, 20.5}};
LinkedList<Vector> rowsList = new LinkedList<Vector>();
LinkedList<Vector> rowsList = new LinkedList<>();
for (int i = 0; i < array.length; i++) {
Vector currentRow = Vectors.dense(array[i]);
rowsList.add(currentRow);

View file

@ -129,7 +129,7 @@ public class JavaActorWordCount {
}).mapToPair(new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override

View file

@ -82,7 +82,7 @@ public class JavaCustomReceiver extends Receiver<String> {
JavaPairDStream<String, Integer> wordCounts = words.mapToPair(
new PairFunction<String, String, Integer>() {
@Override public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override

View file

@ -63,8 +63,8 @@ public final class JavaDirectKafkaWordCount {
SparkConf sparkConf = new SparkConf().setAppName("JavaDirectKafkaWordCount");
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, Durations.seconds(2));
HashSet<String> topicsSet = new HashSet<String>(Arrays.asList(topics.split(",")));
HashMap<String, String> kafkaParams = new HashMap<String, String>();
HashSet<String> topicsSet = new HashSet<>(Arrays.asList(topics.split(",")));
HashMap<String, String> kafkaParams = new HashMap<>();
kafkaParams.put("metadata.broker.list", brokers);
// Create direct kafka stream with brokers and topics
@ -95,7 +95,7 @@ public final class JavaDirectKafkaWordCount {
new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
}).reduceByKey(
new Function2<Integer, Integer, Integer>() {

View file

@ -69,7 +69,7 @@ public final class JavaKafkaWordCount {
JavaStreamingContext jssc = new JavaStreamingContext(sparkConf, new Duration(2000));
int numThreads = Integer.parseInt(args[3]);
Map<String, Integer> topicMap = new HashMap<String, Integer>();
Map<String, Integer> topicMap = new HashMap<>();
String[] topics = args[2].split(",");
for (String topic: topics) {
topicMap.put(topic, numThreads);
@ -96,7 +96,7 @@ public final class JavaKafkaWordCount {
new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override

View file

@ -76,7 +76,7 @@ public final class JavaNetworkWordCount {
new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override

View file

@ -50,7 +50,7 @@ public final class JavaQueueStream {
// Create the queue through which RDDs can be pushed to
// a QueueInputDStream
Queue<JavaRDD<Integer>> rddQueue = new LinkedList<JavaRDD<Integer>>();
Queue<JavaRDD<Integer>> rddQueue = new LinkedList<>();
// Create and push some RDDs into the queue
List<Integer> list = Lists.newArrayList();
@ -68,7 +68,7 @@ public final class JavaQueueStream {
new PairFunction<Integer, Integer, Integer>() {
@Override
public Tuple2<Integer, Integer> call(Integer i) {
return new Tuple2<Integer, Integer>(i % 10, 1);
return new Tuple2<>(i % 10, 1);
}
});
JavaPairDStream<Integer, Integer> reducedStream = mappedStream.reduceByKey(

View file

@ -142,7 +142,7 @@ public final class JavaRecoverableNetworkWordCount {
new PairFunction<String, String, Integer>() {
@Override
public Tuple2<String, Integer> call(String s) {
return new Tuple2<String, Integer>(s, 1);
return new Tuple2<>(s, 1);
}
}).reduceByKey(new Function2<Integer, Integer, Integer>() {
@Override

View file

@ -136,7 +136,7 @@ public final class JavaKinesisWordCountASL { // needs to be public for access fr
JavaStreamingContext jssc = new JavaStreamingContext(sparkConfig, batchInterval);
// Create the Kinesis DStreams
List<JavaDStream<byte[]>> streamsList = new ArrayList<JavaDStream<byte[]>>(numStreams);
List<JavaDStream<byte[]>> streamsList = new ArrayList<>(numStreams);
for (int i = 0; i < numStreams; i++) {
streamsList.add(
KinesisUtils.createStream(jssc, kinesisAppName, streamName, endpointUrl, regionName,

View file

@ -58,12 +58,12 @@ abstract class AbstractCommandBuilder {
private Map<String, String> effectiveConfig;
public AbstractCommandBuilder() {
this.appArgs = new ArrayList<String>();
this.childEnv = new HashMap<String, String>();
this.conf = new HashMap<String, String>();
this.files = new ArrayList<String>();
this.jars = new ArrayList<String>();
this.pyFiles = new ArrayList<String>();
this.appArgs = new ArrayList<>();
this.childEnv = new HashMap<>();
this.conf = new HashMap<>();
this.files = new ArrayList<>();
this.jars = new ArrayList<>();
this.pyFiles = new ArrayList<>();
}
/**
@ -87,7 +87,7 @@ abstract class AbstractCommandBuilder {
* class.
*/
List<String> buildJavaCommand(String extraClassPath) throws IOException {
List<String> cmd = new ArrayList<String>();
List<String> cmd = new ArrayList<>();
String envJavaHome;
if (javaHome != null) {
@ -134,7 +134,7 @@ abstract class AbstractCommandBuilder {
List<String> buildClassPath(String appClassPath) throws IOException {
String sparkHome = getSparkHome();
List<String> cp = new ArrayList<String>();
List<String> cp = new ArrayList<>();
addToClassPath(cp, getenv("SPARK_CLASSPATH"));
addToClassPath(cp, appClassPath);

View file

@ -147,7 +147,7 @@ class CommandBuilderUtils {
* Output: [ "ab cd", "efgh", "i \" j" ]
*/
static List<String> parseOptionString(String s) {
List<String> opts = new ArrayList<String>();
List<String> opts = new ArrayList<>();
StringBuilder opt = new StringBuilder();
boolean inOpt = false;
boolean inSingleQuote = false;

View file

@ -129,7 +129,7 @@ class LauncherServer implements Closeable {
server.setReuseAddress(true);
server.bind(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0));
this.clients = new ArrayList<ServerConnection>();
this.clients = new ArrayList<>();
this.threadIds = new AtomicLong();
this.factory = new NamedThreadFactory(THREAD_NAME_FMT);
this.pending = new ConcurrentHashMap<>();

View file

@ -50,7 +50,7 @@ class Main {
public static void main(String[] argsArray) throws Exception {
checkArgument(argsArray.length > 0, "Not enough arguments: missing class name.");
List<String> args = new ArrayList<String>(Arrays.asList(argsArray));
List<String> args = new ArrayList<>(Arrays.asList(argsArray));
String className = args.remove(0);
boolean printLaunchCommand = !isEmpty(System.getenv("SPARK_PRINT_LAUNCH_COMMAND"));
@ -70,7 +70,7 @@ class Main {
// Ignore parsing exceptions.
}
List<String> help = new ArrayList<String>();
List<String> help = new ArrayList<>();
if (parser.className != null) {
help.add(parser.CLASS);
help.add(parser.className);
@ -82,7 +82,7 @@ class Main {
builder = new SparkClassCommandBuilder(className, args);
}
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
List<String> cmd = builder.buildCommand(env);
if (printLaunchCommand) {
System.err.println("Spark Command: " + join(" ", cmd));
@ -130,7 +130,7 @@ class Main {
return cmd;
}
List<String> newCmd = new ArrayList<String>();
List<String> newCmd = new ArrayList<>();
newCmd.add("env");
for (Map.Entry<String, String> e : childEnv.entrySet()) {

View file

@ -44,7 +44,7 @@ class SparkClassCommandBuilder extends AbstractCommandBuilder {
@Override
public List<String> buildCommand(Map<String, String> env) throws IOException {
List<String> javaOptsKeys = new ArrayList<String>();
List<String> javaOptsKeys = new ArrayList<>();
String memKey = null;
String extraClassPath = null;

View file

@ -75,7 +75,7 @@ public class SparkLauncher {
/** Used internally to create unique logger names. */
private static final AtomicInteger COUNTER = new AtomicInteger();
static final Map<String, String> launcherConfig = new HashMap<String, String>();
static final Map<String, String> launcherConfig = new HashMap<>();
/**
* Set a configuration value for the launcher library. These config values do not affect the
@ -428,7 +428,7 @@ public class SparkLauncher {
}
private ProcessBuilder createBuilder() {
List<String> cmd = new ArrayList<String>();
List<String> cmd = new ArrayList<>();
String script = isWindows() ? "spark-submit.cmd" : "spark-submit";
cmd.add(join(File.separator, builder.getSparkHome(), "bin", script));
cmd.addAll(builder.buildSparkSubmitArgs());
@ -437,7 +437,7 @@ public class SparkLauncher {
// preserved, otherwise the batch interpreter will mess up the arguments. Batch scripts are
// weird.
if (isWindows()) {
List<String> winCmd = new ArrayList<String>();
List<String> winCmd = new ArrayList<>();
for (String arg : cmd) {
winCmd.add(quoteForBatchScript(arg));
}

View file

@ -67,7 +67,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
* command line parsing works. This maps the class name to the resource to use when calling
* spark-submit.
*/
private static final Map<String, String> specialClasses = new HashMap<String, String>();
private static final Map<String, String> specialClasses = new HashMap<>();
static {
specialClasses.put("org.apache.spark.repl.Main", "spark-shell");
specialClasses.put("org.apache.spark.sql.hive.thriftserver.SparkSQLCLIDriver",
@ -87,12 +87,12 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
private boolean allowsMixedArguments;
SparkSubmitCommandBuilder() {
this.sparkArgs = new ArrayList<String>();
this.sparkArgs = new ArrayList<>();
this.printInfo = false;
}
SparkSubmitCommandBuilder(List<String> args) {
this.sparkArgs = new ArrayList<String>();
this.sparkArgs = new ArrayList<>();
List<String> submitArgs = args;
if (args.size() > 0 && args.get(0).equals(PYSPARK_SHELL)) {
this.allowsMixedArguments = true;
@ -123,7 +123,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
}
List<String> buildSparkSubmitArgs() {
List<String> args = new ArrayList<String>();
List<String> args = new ArrayList<>();
SparkSubmitOptionParser parser = new SparkSubmitOptionParser();
if (verbose) {
@ -244,7 +244,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
// The executable is the PYSPARK_DRIVER_PYTHON env variable set by the pyspark script,
// followed by PYSPARK_DRIVER_PYTHON_OPTS.
List<String> pyargs = new ArrayList<String>();
List<String> pyargs = new ArrayList<>();
pyargs.add(firstNonEmpty(System.getenv("PYSPARK_DRIVER_PYTHON"), "python"));
String pyOpts = System.getenv("PYSPARK_DRIVER_PYTHON_OPTS");
if (!isEmpty(pyOpts)) {
@ -270,7 +270,7 @@ class SparkSubmitCommandBuilder extends AbstractCommandBuilder {
env.put("R_PROFILE_USER",
join(File.separator, sparkHome, "R", "lib", "SparkR", "profile", "shell.R"));
List<String> args = new ArrayList<String>();
List<String> args = new ArrayList<>();
args.add(firstNonEmpty(System.getenv("SPARKR_DRIVER_R"), "R"));
return args;
}

View file

@ -73,7 +73,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
"spark.randomOption=foo",
parser.CONF,
SparkLauncher.DRIVER_EXTRA_LIBRARY_PATH + "=/driverLibPath");
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertTrue(findInStringList(env.get(CommandBuilderUtils.getLibPathEnvName()),
@ -125,7 +125,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
"--master=foo",
"--deploy-mode=bar");
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertEquals("python", cmd.get(cmd.size() - 1));
assertEquals(
@ -142,7 +142,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
"script.py",
"arg1");
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
List<String> cmd = buildCommand(sparkSubmitArgs, env);
assertEquals("foo", findArgValue(cmd, "--master"));
@ -178,7 +178,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
+ "/launcher/src/test/resources");
}
Map<String, String> env = new HashMap<String, String>();
Map<String, String> env = new HashMap<>();
List<String> cmd = launcher.buildCommand(env);
// Checks below are different for driver and non-driver mode.
@ -258,7 +258,7 @@ public class SparkSubmitCommandBuilderSuite extends BaseSuite {
}
private Map<String, String> parseConf(List<String> cmd, SparkSubmitOptionParser parser) {
Map<String, String> conf = new HashMap<String, String>();
Map<String, String> conf = new HashMap<>();
for (int i = 0; i < cmd.size(); i++) {
if (cmd.get(i).equals(parser.CONF)) {
String[] val = cmd.get(i + 1).split("=", 2);

View file

@ -56,7 +56,7 @@ public class JavaDecisionTreeClassifierSuite implements Serializable {
JavaRDD<LabeledPoint> data = sc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
// This tests setters. Training with various options is tested in Scala.

View file

@ -56,7 +56,7 @@ public class JavaGBTClassifierSuite implements Serializable {
JavaRDD<LabeledPoint> data = sc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
// This tests setters. Training with various options is tested in Scala.

View file

@ -57,7 +57,7 @@ public class JavaRandomForestClassifierSuite implements Serializable {
JavaRDD<LabeledPoint> data = sc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 2);
// This tests setters. Training with various options is tested in Scala.

View file

@ -56,7 +56,7 @@ public class JavaDecisionTreeRegressorSuite implements Serializable {
JavaRDD<LabeledPoint> data = sc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
// This tests setters. Training with various options is tested in Scala.

View file

@ -56,7 +56,7 @@ public class JavaGBTRegressorSuite implements Serializable {
JavaRDD<LabeledPoint> data = sc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
GBTRegressor rf = new GBTRegressor()

View file

@ -57,7 +57,7 @@ public class JavaRandomForestRegressorSuite implements Serializable {
JavaRDD<LabeledPoint> data = sc.parallelize(
LogisticRegressionSuite.generateLogisticInputAsList(A, B, nPoints, 42), 2).cache();
Map<Integer, Integer> categoricalFeatures = new HashMap<Integer, Integer>();
Map<Integer, Integer> categoricalFeatures = new HashMap<>();
DataFrame dataFrame = TreeTests.setMetadata(data, categoricalFeatures, 0);
// This tests setters. Training with various options is tested in Scala.

View file

@ -45,9 +45,9 @@ public class JavaLDASuite implements Serializable {
@Before
public void setUp() {
sc = new JavaSparkContext("local", "JavaLDA");
ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<Tuple2<Long, Vector>>();
ArrayList<Tuple2<Long, Vector>> tinyCorpus = new ArrayList<>();
for (int i = 0; i < LDASuite.tinyCorpus().length; i++) {
tinyCorpus.add(new Tuple2<Long, Vector>((Long)LDASuite.tinyCorpus()[i]._1(),
tinyCorpus.add(new Tuple2<>((Long)LDASuite.tinyCorpus()[i]._1(),
LDASuite.tinyCorpus()[i]._2()));
}
JavaRDD<Tuple2<Long, Vector>> tmpCorpus = sc.parallelize(tinyCorpus, 2);
@ -189,8 +189,8 @@ public class JavaLDASuite implements Serializable {
double logPerplexity = toyModel.logPerplexity(pairedDocs);
// check: logLikelihood.
ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<Tuple2<Long, Vector>>();
docsSingleWord.add(new Tuple2<Long, Vector>(0L, Vectors.dense(1.0, 0.0, 0.0)));
ArrayList<Tuple2<Long, Vector>> docsSingleWord = new ArrayList<>();
docsSingleWord.add(new Tuple2<>(0L, Vectors.dense(1.0, 0.0, 0.0)));
JavaPairRDD<Long, Vector> single = JavaPairRDD.fromJavaRDD(sc.parallelize(docsSingleWord));
double logLikelihood = toyModel.logLikelihood(single);
}

View file

@ -64,7 +64,7 @@ public class JavaDecisionTreeSuite implements Serializable {
public void runDTUsingConstructor() {
List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
JavaRDD<LabeledPoint> rdd = sc.parallelize(arr);
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
int maxDepth = 4;
@ -84,7 +84,7 @@ public class JavaDecisionTreeSuite implements Serializable {
public void runDTUsingStaticMethods() {
List<LabeledPoint> arr = DecisionTreeSuite.generateCategoricalDataPointsAsJavaList();
JavaRDD<LabeledPoint> rdd = sc.parallelize(arr);
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<Integer, Integer>();
HashMap<Integer, Integer> categoricalFeaturesInfo = new HashMap<>();
categoricalFeaturesInfo.put(1, 2); // feature 1 has 2 categories
int maxDepth = 4;

View file

@ -201,7 +201,7 @@ public class DataTypes {
if (fields == null) {
throw new IllegalArgumentException("fields should not be null.");
}
Set<String> distinctNames = new HashSet<String>();
Set<String> distinctNames = new HashSet<>();
for (StructField field : fields) {
if (field == null) {
throw new IllegalArgumentException(

View file

@ -149,7 +149,7 @@ public abstract class SpecificParquetRecordReaderBase<T> extends RecordReader<Vo
* by MapReduce.
*/
public static List<String> listDirectory(File path) throws IOException {
List<String> result = new ArrayList<String>();
List<String> result = new ArrayList<>();
if (path.isDirectory()) {
for (File f: path.listFiles()) {
result.addAll(listDirectory(f));

View file

@ -111,7 +111,7 @@ public class JavaApplySchemaSuite implements Serializable {
df.registerTempTable("people");
Row[] actual = sqlContext.sql("SELECT * FROM people").collect();
List<Row> expected = new ArrayList<Row>(2);
List<Row> expected = new ArrayList<>(2);
expected.add(RowFactory.create("Michael", 29));
expected.add(RowFactory.create("Yin", 28));

View file

@ -67,7 +67,7 @@ public class JavaDatasetSuite implements Serializable {
}
private <T1, T2> Tuple2<T1, T2> tuple2(T1 t1, T2 t2) {
return new Tuple2<T1, T2>(t1, t2);
return new Tuple2<>(t1, t2);
}
@Test

View file

@ -42,14 +42,14 @@ public class MyDoubleAvg extends UserDefinedAggregateFunction {
private DataType _returnDataType;
public MyDoubleAvg() {
List<StructField> inputFields = new ArrayList<StructField>();
List<StructField> inputFields = new ArrayList<>();
inputFields.add(DataTypes.createStructField("inputDouble", DataTypes.DoubleType, true));
_inputDataType = DataTypes.createStructType(inputFields);
// The buffer has two values, bufferSum for storing the current sum and
// bufferCount for storing the number of non-null input values that have been contribuetd
// to the current sum.
List<StructField> bufferFields = new ArrayList<StructField>();
List<StructField> bufferFields = new ArrayList<>();
bufferFields.add(DataTypes.createStructField("bufferSum", DataTypes.DoubleType, true));
bufferFields.add(DataTypes.createStructField("bufferCount", DataTypes.LongType, true));
_bufferSchema = DataTypes.createStructType(bufferFields);

View file

@ -41,11 +41,11 @@ public class MyDoubleSum extends UserDefinedAggregateFunction {
private DataType _returnDataType;
public MyDoubleSum() {
List<StructField> inputFields = new ArrayList<StructField>();
List<StructField> inputFields = new ArrayList<>();
inputFields.add(DataTypes.createStructField("inputDouble", DataTypes.DoubleType, true));
_inputDataType = DataTypes.createStructType(inputFields);
List<StructField> bufferFields = new ArrayList<StructField>();
List<StructField> bufferFields = new ArrayList<>();
bufferFields.add(DataTypes.createStructField("bufferDouble", DataTypes.DoubleType, true));
_bufferSchema = DataTypes.createStructType(bufferFields);

View file

@ -50,7 +50,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
private static final org.apache.thrift.protocol.TField LINT_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("lintString", org.apache.thrift.protocol.TType.LIST, (short)5);
private static final org.apache.thrift.protocol.TField M_STRING_STRING_FIELD_DESC = new org.apache.thrift.protocol.TField("mStringString", org.apache.thrift.protocol.TType.MAP, (short)6);
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<Class<? extends IScheme>, SchemeFactory>();
private static final Map<Class<? extends IScheme>, SchemeFactory> schemes = new HashMap<>();
static {
schemes.put(StandardScheme.class, new ComplexStandardSchemeFactory());
schemes.put(TupleScheme.class, new ComplexTupleSchemeFactory());
@ -72,7 +72,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
LINT_STRING((short)5, "lintString"),
M_STRING_STRING((short)6, "mStringString");
private static final Map<String, _Fields> byName = new HashMap<String, _Fields>();
private static final Map<String, _Fields> byName = new HashMap<>();
static {
for (_Fields field : EnumSet.allOf(_Fields.class)) {
@ -141,7 +141,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
private byte __isset_bitfield = 0;
public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap;
static {
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class);
Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<>(_Fields.class);
tmpMap.put(_Fields.AINT, new org.apache.thrift.meta_data.FieldMetaData("aint", org.apache.thrift.TFieldRequirementType.DEFAULT,
new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32)));
tmpMap.put(_Fields.A_STRING, new org.apache.thrift.meta_data.FieldMetaData("aString", org.apache.thrift.TFieldRequirementType.DEFAULT,
@ -194,28 +194,28 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
this.aString = other.aString;
}
if (other.isSetLint()) {
List<Integer> __this__lint = new ArrayList<Integer>();
List<Integer> __this__lint = new ArrayList<>();
for (Integer other_element : other.lint) {
__this__lint.add(other_element);
}
this.lint = __this__lint;
}
if (other.isSetLString()) {
List<String> __this__lString = new ArrayList<String>();
List<String> __this__lString = new ArrayList<>();
for (String other_element : other.lString) {
__this__lString.add(other_element);
}
this.lString = __this__lString;
}
if (other.isSetLintString()) {
List<IntString> __this__lintString = new ArrayList<IntString>();
List<IntString> __this__lintString = new ArrayList<>();
for (IntString other_element : other.lintString) {
__this__lintString.add(new IntString(other_element));
}
this.lintString = __this__lintString;
}
if (other.isSetMStringString()) {
Map<String,String> __this__mStringString = new HashMap<String,String>();
Map<String,String> __this__mStringString = new HashMap<>();
for (Map.Entry<String, String> other_element : other.mStringString.entrySet()) {
String other_element_key = other_element.getKey();
@ -339,7 +339,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
public void addToLString(String elem) {
if (this.lString == null) {
this.lString = new ArrayList<String>();
this.lString = new ArrayList<>();
}
this.lString.add(elem);
}
@ -411,7 +411,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
public void putToMStringString(String key, String val) {
if (this.mStringString == null) {
this.mStringString = new HashMap<String,String>();
this.mStringString = new HashMap<>();
}
this.mStringString.put(key, val);
}
@ -876,7 +876,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list0 = iprot.readListBegin();
struct.lint = new ArrayList<Integer>(_list0.size);
struct.lint = new ArrayList<>(_list0.size);
for (int _i1 = 0; _i1 < _list0.size; ++_i1)
{
int _elem2; // required
@ -894,7 +894,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list3 = iprot.readListBegin();
struct.lString = new ArrayList<String>(_list3.size);
struct.lString = new ArrayList<>(_list3.size);
for (int _i4 = 0; _i4 < _list3.size; ++_i4)
{
String _elem5; // required
@ -912,7 +912,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (schemeField.type == org.apache.thrift.protocol.TType.LIST) {
{
org.apache.thrift.protocol.TList _list6 = iprot.readListBegin();
struct.lintString = new ArrayList<IntString>(_list6.size);
struct.lintString = new ArrayList<>(_list6.size);
for (int _i7 = 0; _i7 < _list6.size; ++_i7)
{
IntString _elem8; // required
@ -1114,7 +1114,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (incoming.get(2)) {
{
org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I32, iprot.readI32());
struct.lint = new ArrayList<Integer>(_list21.size);
struct.lint = new ArrayList<>(_list21.size);
for (int _i22 = 0; _i22 < _list21.size; ++_i22)
{
int _elem23; // required
@ -1127,7 +1127,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (incoming.get(3)) {
{
org.apache.thrift.protocol.TList _list24 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.lString = new ArrayList<String>(_list24.size);
struct.lString = new ArrayList<>(_list24.size);
for (int _i25 = 0; _i25 < _list24.size; ++_i25)
{
String _elem26; // required
@ -1140,7 +1140,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (incoming.get(4)) {
{
org.apache.thrift.protocol.TList _list27 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32());
struct.lintString = new ArrayList<IntString>(_list27.size);
struct.lintString = new ArrayList<>(_list27.size);
for (int _i28 = 0; _i28 < _list27.size; ++_i28)
{
IntString _elem29; // required
@ -1154,7 +1154,7 @@ public class Complex implements org.apache.thrift.TBase<Complex, Complex._Fields
if (incoming.get(5)) {
{
org.apache.thrift.protocol.TMap _map30 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32());
struct.mStringString = new HashMap<String,String>(2*_map30.size);
struct.mStringString = new HashMap<>(2*_map30.size);
for (int _i31 = 0; _i31 < _map30.size; ++_i31)
{
String _key32; // required