Skip to content
This repository was archived by the owner on Jan 9, 2020. It is now read-only.

Commit e329bea

Browse files
dongjoon-hyunsrowen
authored andcommitted
[MINOR][BUILD] Fix Java linter errors
This PR cleans up a few Java linter errors for Apache Spark 2.2 release. ```bash $ dev/lint-java Using `mvn` from path: /usr/local/bin/mvn Checkstyle checks passed. ``` We can check the result at Travis CI, [here](https://travis-ci.org/dongjoon-hyun/spark/builds/244297894). Author: Dongjoon Hyun <[email protected]> Closes apache#18345 from dongjoon-hyun/fix_lint_java_2. (cherry picked from commit ecc5631) Signed-off-by: Sean Owen <[email protected]>
1 parent 7b50736 commit e329bea

File tree

5 files changed

+10
-6
lines changed

5 files changed

+10
-6
lines changed

common/network-shuffle/src/main/java/org/apache/spark/network/shuffle/OneForOneBlockFetcher.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@ private class DownloadCallback implements StreamCallback {
157157
private File targetFile = null;
158158
private int chunkIndex;
159159

160-
public DownloadCallback(File targetFile, int chunkIndex) throws IOException {
160+
DownloadCallback(File targetFile, int chunkIndex) throws IOException {
161161
this.targetFile = targetFile;
162162
this.channel = Channels.newChannel(new FileOutputStream(targetFile));
163163
this.chunkIndex = chunkIndex;

core/src/main/java/org/apache/spark/shuffle/sort/UnsafeShuffleWriter.java

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -353,8 +353,9 @@ private long[] mergeSpillsWithFileStream(
353353
}
354354
for (int partition = 0; partition < numPartitions; partition++) {
355355
final long initialFileLength = mergedFileOutputStream.getByteCount();
356-
// Shield the underlying output stream from close() calls, so that we can close the higher
357-
// level streams to make sure all data is really flushed and internal state is cleaned.
356+
// Shield the underlying output stream from close() calls, so that we can close
357+
// the higher level streams to make sure all data is really flushed and internal state is
358+
// cleaned.
358359
OutputStream partitionOutput = new CloseShieldOutputStream(
359360
new TimeTrackingOutputStream(writeMetrics, mergedFileOutputStream));
360361
partitionOutput = blockManager.serializerManager().wrapForEncryption(partitionOutput);

examples/src/main/java/org/apache/spark/examples/ml/JavaALSExample.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -121,7 +121,7 @@ public static void main(String[] args) {
121121
// $example off$
122122
userRecs.show();
123123
movieRecs.show();
124-
124+
125125
spark.stop();
126126
}
127127
}

examples/src/main/java/org/apache/spark/examples/sql/JavaSQLDataSourceExample.java

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,11 @@ private static void runBasicDataSourceExample(SparkSession spark) {
124124
peopleDF.write().bucketBy(42, "name").sortBy("age").saveAsTable("people_bucketed");
125125
// $example off:write_sorting_and_bucketing$
126126
// $example on:write_partitioning$
127-
usersDF.write().partitionBy("favorite_color").format("parquet").save("namesPartByColor.parquet");
127+
usersDF
128+
.write()
129+
.partitionBy("favorite_color")
130+
.format("parquet")
131+
.save("namesPartByColor.parquet");
128132
// $example off:write_partitioning$
129133
// $example on:write_partition_and_bucket$
130134
peopleDF

sql/catalyst/src/main/java/org/apache/spark/sql/streaming/OutputMode.java

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,6 @@
1717

1818
package org.apache.spark.sql.streaming;
1919

20-
import org.apache.spark.annotation.Experimental;
2120
import org.apache.spark.annotation.InterfaceStability;
2221
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes;
2322

0 commit comments

Comments
 (0)