Skip to content

Commit 0e7940b

Browse files
author
Robert Kruszewski
committed
Resolve conflicts
1 parent 6197903 commit 0e7940b

File tree

5 files changed

+2
-25
lines changed

5 files changed

+2
-25
lines changed

dev/test-dependencies.sh

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,11 +29,7 @@ export LC_ALL=C
2929
# TODO: This would be much nicer to do in SBT, once SBT supports Maven-style resolution.
3030

3131
# NOTE: These should match those in the release publishing script
32-
<<<<<<< HEAD
3332
HADOOP2_MODULE_PROFILES="-Phadoop-cloud -Pkubernetes -Pyarn -Phive"
34-
=======
35-
HADOOP2_MODULE_PROFILES="-Phive-thriftserver -Pmesos -Pkafka-0-8 -Pyarn -Phive"
36-
>>>>>>> master
3733
MVN="build/mvn"
3834
HADOOP_PROFILES=(
3935
hadoop-palantir

pom.xml

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -696,11 +696,7 @@
696696
<dependency>
697697
<groupId>io.netty</groupId>
698698
<artifactId>netty-all</artifactId>
699-
<<<<<<< HEAD
700699
<version>4.0.50.Final</version>
701-
=======
702-
<version>4.0.47.Final</version>
703-
>>>>>>> master
704700
</dependency>
705701
<dependency>
706702
<groupId>io.netty</groupId>

project/SparkBuild.scala

Lines changed: 1 addition & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -57,20 +57,13 @@ object BuildCommons {
5757
"tags", "sketch", "kvstore"
5858
).map(ProjectRef(buildLocation, _)) ++ sqlProjects ++ streamingProjects
5959

60-
<<<<<<< HEAD
6160
val optionallyEnabledProjects@Seq(mesos, yarn, sparkGangliaLgpl,
6261
streamingKinesisAsl, dockerIntegrationTests, hadoopCloud, kubernetes, _*) =
63-
Seq("mesos", "yarn", "ganglia-lgpl", "streaming-kinesis-asl", "docker-integration-tests",
62+
Seq("mesos", "yarn", "streaming-kafka-0-8", "ganglia-lgpl", "streaming-kinesis-asl", "docker-integration-tests",
6463
"hadoop-cloud", "kubernetes", "kubernetes-integration-tests",
6564
"kubernetes-integration-tests-spark-jobs", "kubernetes-integration-tests-spark-jobs-helpers",
6665
"kubernetes-docker-minimal-bundle"
6766
).map(ProjectRef(buildLocation, _))
68-
=======
69-
val optionallyEnabledProjects@Seq(mesos, yarn, streamingKafka, sparkGangliaLgpl,
70-
streamingKinesisAsl, dockerIntegrationTests, hadoopCloud) =
71-
Seq("mesos", "yarn", "streaming-kafka-0-8", "ganglia-lgpl", "streaming-kinesis-asl",
72-
"docker-integration-tests", "hadoop-cloud").map(ProjectRef(buildLocation, _))
73-
>>>>>>> master
7467

7568
val assemblyProjects@Seq(networkYarn, streamingFlumeAssembly, streamingKafkaAssembly, streamingKafka010Assembly, streamingKinesisAslAssembly) =
7669
Seq("network-yarn", "streaming-flume-assembly", "streaming-kafka-0-8-assembly", "streaming-kafka-0-10-assembly", "streaming-kinesis-asl-assembly")

sql/catalyst/src/main/scala/org/apache/spark/sql/internal/SQLConf.scala

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -322,20 +322,18 @@ object SQLConf {
322322
.booleanConf
323323
.createWithDefault(true)
324324

325-
<<<<<<< HEAD
326325
val PARQUET_PARTITION_PRUNING_ENABLED = buildConf("spark.sql.parquet.enablePartitionPruning")
327326
.doc("Enables driver-side partition pruning for Parquet.")
328327
.booleanConf
329328
.createWithDefault(true)
330-
=======
329+
331330
val ORC_COMPRESSION = buildConf("spark.sql.orc.compression.codec")
332331
.doc("Sets the compression codec use when writing ORC files. Acceptable values include: " +
333332
"none, uncompressed, snappy, zlib, lzo.")
334333
.stringConf
335334
.transform(_.toLowerCase(Locale.ROOT))
336335
.checkValues(Set("none", "uncompressed", "snappy", "zlib", "lzo"))
337336
.createWithDefault("snappy")
338-
>>>>>>> master
339337

340338
val ORC_FILTER_PUSHDOWN_ENABLED = buildConf("spark.sql.orc.filterPushdown")
341339
.doc("When true, enable filter pushdown for ORC files.")

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -512,15 +512,9 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
512512
val accu = new NumRowGroupsAcc
513513
sparkContext.register(accu)
514514

515-
<<<<<<< HEAD
516515
val df = spark.read.parquet(path).filter("a < 100")
517516
df.foreachPartition(_.foreach(v => accu.add(0)))
518517
df.collect
519-
=======
520-
val df = spark.read.parquet(path).filter("a < 100")
521-
df.foreachPartition((it: Iterator[Row]) => it.foreach(v => accu.add(0)))
522-
df.collect
523-
>>>>>>> master
524518

525519
assert(func(accu.value))
526520
AccumulatorContext.remove(accu.id)

0 commit comments

Comments
 (0)