Skip to content

Commit 2ca1ed9

Browse files
author
Robert Kruszewski
committed
resolve conflicts
1 parent aa2e951 commit 2ca1ed9

File tree

4 files changed

+4
-38
lines changed

4 files changed

+4
-38
lines changed

core/src/main/scala/org/apache/spark/deploy/SparkSubmit.scala

Lines changed: 4 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -277,6 +277,10 @@ object SparkSubmit extends CommandLineUtils {
277277

278278
// Fail fast, the following modes are not supported or applicable
279279
(clusterManager, deployMode) match {
280+
case (KUBERNETES, CLIENT) =>
281+
printErrorAndExit("Client mode is currently not supported for Kubernetes.")
282+
case (KUBERNETES, CLUSTER) if args.isPython || args.isR =>
283+
printErrorAndExit("Kubernetes does not currently support python or R applications.")
280284
case (STANDALONE, CLUSTER) if args.isPython =>
281285
printErrorAndExit("Cluster deploy mode is currently not supported for python " +
282286
"applications on standalone clusters.")
@@ -350,29 +354,6 @@ object SparkSubmit extends CommandLineUtils {
350354
})
351355
// scalastyle:on runtimeaddshutdownhook
352356

353-
<<<<<<< HEAD
354-
// The following modes are not supported or applicable
355-
(clusterManager, deployMode) match {
356-
case (KUBERNETES, CLIENT) =>
357-
printErrorAndExit("Client mode is currently not supported for Kubernetes.")
358-
case (KUBERNETES, CLUSTER) if args.isPython || args.isR =>
359-
printErrorAndExit("Kubernetes does not currently support python or R applications.")
360-
case (STANDALONE, CLUSTER) if args.isPython =>
361-
printErrorAndExit("Cluster deploy mode is currently not supported for python " +
362-
"applications on standalone clusters.")
363-
case (STANDALONE, CLUSTER) if args.isR =>
364-
printErrorAndExit("Cluster deploy mode is currently not supported for R " +
365-
"applications on standalone clusters.")
366-
case (LOCAL, CLUSTER) =>
367-
printErrorAndExit("Cluster deploy mode is not compatible with master \"local\"")
368-
case (_, CLUSTER) if isShell(args.primaryResource) =>
369-
printErrorAndExit("Cluster deploy mode is not applicable to Spark shells.")
370-
case (_, CLUSTER) if isSqlShell(args.mainClass) =>
371-
printErrorAndExit("Cluster deploy mode is not applicable to Spark SQL shell.")
372-
case (_, CLUSTER) if isThriftServer(args.mainClass) =>
373-
printErrorAndExit("Cluster deploy mode is not applicable to Spark Thrift server.")
374-
case _ =>
375-
=======
376357
// Resolve glob path for different resources.
377358
args.jars = Option(args.jars).map(resolveGlobPaths(_, hadoopConf)).orNull
378359
args.files = Option(args.files).map(resolveGlobPaths(_, hadoopConf)).orNull
@@ -390,7 +371,6 @@ object SparkSubmit extends CommandLineUtils {
390371
args.pyFiles = Option(args.pyFiles).map {
391372
downloadFileList(_, targetDir, args.sparkProperties, hadoopConf)
392373
}.orNull
393-
>>>>>>> master
394374
}
395375

396376

pom.xml

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,6 @@
133133
<!-- Version used for internal directory structure -->
134134
<hive.version.short>1.2.1</hive.version.short>
135135
<derby.version>10.12.1.1</derby.version>
136-
<<<<<<< HEAD
137136
<parquet.version>1.9.1-palantir3</parquet.version>
138137
<feign.version>8.18.0</feign.version>
139138
<okhttp3.version>3.8.0</okhttp3.version>
@@ -142,11 +141,6 @@
142141
<retrofit.version>2.2.0</retrofit.version>
143142
<bouncycastle.version>1.54</bouncycastle.version>
144143
<jetty.version>9.4.6.v20170531</jetty.version>
145-
=======
146-
<parquet.version>1.8.2</parquet.version>
147-
<hive.parquet.version>1.6.0</hive.parquet.version>
148-
<jetty.version>9.3.20.v20170531</jetty.version>
149-
>>>>>>> master
150144
<javaxservlet.version>3.1.0</javaxservlet.version>
151145
<chill.version>0.8.0</chill.version>
152146
<ivy.version>2.4.0</ivy.version>

project/SparkBuild.scala

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,11 +92,7 @@ object SparkBuild extends PomBuild {
9292
val projectsMap: mutable.Map[String, Seq[Setting[_]]] = mutable.Map.empty
9393

9494
override val profiles = {
95-
<<<<<<< HEAD
96-
val profiles = Properties.propOrNone("sbt.maven.profiles") orElse Properties.envOrNone("SBT_MAVEN_PROFILES") match {
97-
=======
9895
Properties.envOrNone("SBT_MAVEN_PROFILES") match {
99-
>>>>>>> master
10096
case None => Seq("sbt")
10197
case Some(v) =>
10298
v.split("(\\s+|,)").filterNot(_.isEmpty).map(_.trim.replaceAll("-P", "")).toSeq

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -546,13 +546,9 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
546546
import testImplicits._
547547

548548
Seq(true, false).foreach { vectorized =>
549-
<<<<<<< HEAD
550-
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized.toString) {
551-
=======
552549
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized.toString,
553550
SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> true.toString,
554551
SQLConf.SUPPORT_QUOTED_REGEX_COLUMN_NAME.key -> "false") {
555-
>>>>>>> master
556552
withTempPath { path =>
557553
Seq(Some(1), None).toDF("col.dots").write.parquet(path.getAbsolutePath)
558554
assert(spark.read.parquet(path.getAbsolutePath).where("`col.dots` > 0").count() == 1)

0 commit comments

Comments
 (0)