Skip to content

Commit a958ea8

Browse files
committed
Resolve conflicts from merge with apache
1 parent ede46cf commit a958ea8

File tree

10 files changed

+3
-70
lines changed

10 files changed

+3
-70
lines changed

common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVTypeInfo.java

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,20 +17,16 @@
1717

1818
package org.apache.spark.util.kvstore;
1919

20-
import com.google.common.base.Preconditions;
2120
import java.lang.reflect.Field;
2221
import java.lang.reflect.Method;
2322
import java.util.HashMap;
2423
import java.util.Map;
2524
import java.util.stream.Stream;
2625

27-
<<<<<<< HEAD:common/kvstore/src/main/java/org/apache/spark/kvstore/KVTypeInfo.java
28-
=======
2926
import com.google.common.base.Preconditions;
3027

3128
import org.apache.spark.annotation.Private;
3229

33-
>>>>>>> origin/master:common/kvstore/src/main/java/org/apache/spark/util/kvstore/KVTypeInfo.java
3430
/**
3531
* Wrapper around types managed in a KVStore, providing easy access to their indexed fields.
3632
*/

core/src/main/java/org/apache/spark/shuffle/sort/ShuffleExternalSorter.java

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -121,23 +121,15 @@ final class ShuffleExternalSorter extends MemoryConsumer {
121121
this.numPartitions = numPartitions;
122122
// Use getSizeAsKb (not bytes) to maintain backwards compatibility if no units are provided
123123
this.fileBufferSizeBytes =
124-
<<<<<<< HEAD
125-
(int) (long) conf.get(package$.MODULE$.SHUFFLE_FILE_BUFFER_SIZE()) * 1024;
126-
=======
127124
(int) (long) conf.get(package$.MODULE$.SHUFFLE_FILE_BUFFER_SIZE()) * 1024;
128-
>>>>>>> origin/master
129125
this.numElementsForSpillThreshold =
130126
conf.getLong("spark.shuffle.spill.numElementsForceSpillThreshold", 1024 * 1024 * 1024);
131127
this.writeMetrics = writeMetrics;
132128
this.inMemSorter = new ShuffleInMemorySorter(
133129
this, initialSize, conf.getBoolean("spark.shuffle.sort.useRadixSort", true));
134130
this.peakMemoryUsedBytes = getMemoryUsage();
135131
this.diskWriteBufferSize =
136-
<<<<<<< HEAD
137-
(int) (long) conf.get(package$.MODULE$.SHUFFLE_DISK_WRITE_BUFFER_SIZE());
138-
=======
139132
(int) (long) conf.get(package$.MODULE$.SHUFFLE_DISK_WRITE_BUFFER_SIZE());
140-
>>>>>>> origin/master
141133
}
142134

143135
/**

core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -304,11 +304,8 @@ object SparkSubmit extends CommandLineUtils {
304304
}
305305
val isYarnCluster = clusterManager == YARN && deployMode == CLUSTER
306306
val isMesosCluster = clusterManager == MESOS && deployMode == CLUSTER
307-
<<<<<<< HEAD
308307
val isKubernetesCluster = clusterManager == KUBERNETES && deployMode == CLUSTER
309-
=======
310308
val isStandAloneCluster = clusterManager == STANDALONE && deployMode == CLUSTER
311-
>>>>>>> origin/master
312309

313310
if (!isMesosCluster && !isStandAloneCluster) {
314311
// Resolve maven dependencies if there are any and add classpath to jars. Add them to py-files

pom.xml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,6 @@
133133
<!-- Version used for internal directory structure -->
134134
<hive.version.short>1.2.1</hive.version.short>
135135
<derby.version>10.12.1.1</derby.version>
136-
<<<<<<< HEAD
137136
<parquet.version>1.9.1-palantir3</parquet.version>
138137
<feign.version>8.18.0</feign.version>
139138
<okhttp3.version>3.8.0</okhttp3.version>
@@ -142,13 +141,8 @@
142141
<retrofit.version>2.2.0</retrofit.version>
143142
<bouncycastle.version>1.54</bouncycastle.version>
144143
<jetty.version>9.4.6.v20170531</jetty.version>
145-
=======
146-
<parquet.version>1.8.2</parquet.version>
147144
<orc.version>1.4.0</orc.version>
148145
<orc.classifier>nohive</orc.classifier>
149-
<hive.parquet.version>1.6.0</hive.parquet.version>
150-
<jetty.version>9.3.20.v20170531</jetty.version>
151-
>>>>>>> origin/master
152146
<javaxservlet.version>3.1.0</javaxservlet.version>
153147
<chill.version>0.8.0</chill.version>
154148
<ivy.version>2.4.0</ivy.version>

project/plugins.sbt

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
1-
<<<<<<< HEAD
21
addSbtPlugin("com.typesafe.sbt" % "sbt-git" % "0.8.5")
32

4-
=======
53
// need to make changes to uptake sbt 1.0 support in "com.eed3si9n" % "sbt-assembly" % "1.14.5"
6-
>>>>>>> origin/master
74
addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.11.2")
85

96
// sbt 1.0.0 support: https://github.com/typesafehub/sbteclipse/issues/343

sql/catalyst/src/main/java/org/apache/spark/sql/execution/UnsafeExternalRowSorter.java

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -206,21 +206,11 @@ private static final class RowComparator extends RecordComparator {
206206

207207
@Override
208208
public int compare(Object baseObj1, long baseOff1, Object baseObj2, long baseOff2) {
209-
<<<<<<< HEAD
210-
// TODO: Why are the sizes -1?
211-
row1.pointTo(baseObj1, baseOff1, -1);
212-
row2.pointTo(baseObj2, baseOff2, -1);
213-
int comparison = ordering.compare(row1, row2);
214-
row1.pointTo(null, 0L, -1);
215-
row2.pointTo(null, 0L, -1);
216-
return comparison;
217-
=======
218209
// Note that since ordering doesn't need the total length of the record, we just pass 0
219210
// into the row.
220211
row1.pointTo(baseObj1, baseOff1, 0);
221212
row2.pointTo(baseObj2, baseOff2, 0);
222213
return ordering.compare(row1, row2);
223-
>>>>>>> origin/master
224214
}
225215
}
226216
}

sql/core/pom.xml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -111,10 +111,6 @@
111111
<dependency>
112112
<groupId>com.fasterxml.jackson.core</groupId>
113113
<artifactId>jackson-databind</artifactId>
114-
<<<<<<< HEAD
115-
<version>${fasterxml.jackson.databind.version}</version>
116-
=======
117-
>>>>>>> origin/master
118114
</dependency>
119115
<dependency>
120116
<groupId>org.apache.arrow</groupId>

sql/core/src/main/java/org/apache/spark/sql/execution/UnsafeKVExternalSorter.java

Lines changed: 0 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -244,18 +244,9 @@ private static final class KVComparator extends RecordComparator {
244244
public int compare(Object baseObj1, long baseOff1, Object baseObj2, long baseOff2) {
245245
// Note that since ordering doesn't need the total length of the record, we just pass 0
246246
// into the row.
247-
<<<<<<< HEAD
248-
row1.pointTo(baseObj1, baseOff1 + 4, -1);
249-
row2.pointTo(baseObj2, baseOff2 + 4, -1);
250-
int comparison = ordering.compare(row1, row2);
251-
row1.pointTo(null, 0L, -1);
252-
row2.pointTo(null, 0L, -1);
253-
return comparison;
254-
=======
255247
row1.pointTo(baseObj1, baseOff1 + 4, 0);
256248
row2.pointTo(baseObj2, baseOff2 + 4, 0);
257249
return ordering.compare(row1, row2);
258-
>>>>>>> origin/master
259250
}
260251
}
261252

sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/SQLHadoopMapReduceCommitProtocol.scala

Lines changed: 3 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -33,22 +33,15 @@ class SQLHadoopMapReduceCommitProtocol(jobId: String, path: String)
3333
extends HadoopMapReduceCommitProtocol(jobId, path) with Serializable with Logging {
3434

3535
override protected def setupCommitter(context: TaskAttemptContext): OutputCommitter = {
36-
val clazz = context.getConfiguration
37-
.getClass(SQLConf.OUTPUT_COMMITTER_CLASS.key, null, classOf[OutputCommitter])
36+
var committer = context.getOutputFormatClass.newInstance().getOutputCommitter(context)
3837

39-
<<<<<<< HEAD
40-
if (clazz != null) {
41-
logInfo(s"Using user defined output committer class ${clazz.getCanonicalName}")
42-
43-
=======
4438
val configuration = context.getConfiguration
4539
val clazz =
4640
configuration.getClass(SQLConf.OUTPUT_COMMITTER_CLASS.key, null, classOf[OutputCommitter])
4741

4842
if (clazz != null) {
4943
logInfo(s"Using user defined output committer class ${clazz.getCanonicalName}")
5044

51-
>>>>>>> origin/master
5245
// Every output format based on org.apache.hadoop.mapreduce.lib.output.OutputFormat
5346
// has an associated output committer. To override this output committer,
5447
// we will first try to use the output committer set in SQLConf.OUTPUT_COMMITTER_CLASS.
@@ -58,25 +51,15 @@ class SQLHadoopMapReduceCommitProtocol(jobId: String, path: String)
5851
// The specified output committer is a FileOutputCommitter.
5952
// So, we will use the FileOutputCommitter-specified constructor.
6053
val ctor = clazz.getDeclaredConstructor(classOf[Path], classOf[TaskAttemptContext])
61-
<<<<<<< HEAD
62-
ctor.newInstance(new Path(path), context)
63-
=======
6454
committer = ctor.newInstance(new Path(path), context)
65-
>>>>>>> origin/master
6655
} else {
6756
// The specified output committer is just an OutputCommitter.
6857
// So, we will use the no-argument constructor.
6958
val ctor = clazz.getDeclaredConstructor()
70-
<<<<<<< HEAD
71-
ctor.newInstance()
72-
=======
7359
committer = ctor.newInstance()
74-
>>>>>>> origin/master
7560
}
76-
} else {
77-
val committer = context.getOutputFormatClass.newInstance().getOutputCommitter(context)
78-
logInfo(s"Using output committer class ${committer.getClass.getCanonicalName}")
79-
committer
8061
}
62+
logInfo(s"Using output committer class ${committer.getClass.getCanonicalName}")
63+
committer
8164
}
8265
}

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetPartitionDiscoverySuite.scala

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,8 @@ import org.apache.spark.sql.catalyst.expressions.Literal
3535
import org.apache.spark.sql.catalyst.util.DateTimeUtils
3636
import org.apache.spark.sql.execution.datasources._
3737
import org.apache.spark.sql.execution.datasources.{PartitionPath => Partition}
38-
<<<<<<< HEAD
39-
=======
4038
import org.apache.spark.sql.execution.streaming.MemoryStream
4139
import org.apache.spark.sql.functions._
42-
>>>>>>> origin/master
4340
import org.apache.spark.sql.internal.SQLConf
4441
import org.apache.spark.sql.test.SharedSQLContext
4542
import org.apache.spark.sql.types._

0 commit comments

Comments
 (0)