Skip to content

Commit 228560c

Browse files
committed
Resolve conflicts
1 parent 66338b8 commit 228560c

File tree

8 files changed

+3
-63
lines changed

8 files changed

+3
-63
lines changed

core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedSchedulerBackend.scala

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -232,17 +232,11 @@ class CoarseGrainedSchedulerBackend(scheduler: TaskSchedulerImpl, val rpcEnv: Rp
232232
removeWorker(workerId, host, message)
233233
context.reply(true)
234234

235-
<<<<<<< HEAD
236235
case RetrieveSparkAppConfig(_) =>
237-
val reply = SparkAppConfig(sparkProperties,
238-
SparkEnv.get.securityManager.getIOEncryptionKey())
239-
=======
240-
case RetrieveSparkAppConfig =>
241236
val reply = SparkAppConfig(
242237
sparkProperties,
243238
SparkEnv.get.securityManager.getIOEncryptionKey(),
244239
hadoopDelegationCreds)
245-
>>>>>>> origin/master
246240
context.reply(reply)
247241
}
248242

mllib/src/test/scala/org/apache/spark/ml/classification/MultilayerPerceptronClassifierSuite.scala

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,6 @@ class MultilayerPerceptronClassifierSuite
8484
}
8585
}
8686

87-
<<<<<<< HEAD
88-
ignore("Test setWeights by training restart -- ignore palantir/spark") {
89-
=======
9087
test("Predicted class probabilities: calibration on toy dataset") {
9188
val layers = Array[Int](4, 5, 2)
9289

@@ -128,8 +125,7 @@ class MultilayerPerceptronClassifierSuite
128125
}
129126
}
130127

131-
test("Test setWeights by training restart") {
132-
>>>>>>> origin/master
128+
ignore("Test setWeights by training restart -- ignore palantir/spark") {
133129
val dataFrame = Seq(
134130
(Vectors.dense(0.0, 0.0), 0.0),
135131
(Vectors.dense(0.0, 1.0), 1.0),

pom.xml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -193,10 +193,7 @@
193193
<jsr305.version>3.0.1</jsr305.version>
194194
<libthrift.version>0.9.3</libthrift.version>
195195
<antlr4.version>4.7</antlr4.version>
196-
<<<<<<< HEAD
197196
<antlr.version>3.4</antlr.version>
198-
=======
199-
>>>>>>> origin/master
200197
<jpam.version>1.1</jpam.version>
201198
<selenium.version>2.52.0</selenium.version>
202199
<paranamer.version>2.6</paranamer.version>

sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -64,11 +64,7 @@ singleDataType
6464
: dataType EOF
6565
;
6666

67-
<<<<<<< HEAD
68-
standaloneColTypeList
69-
=======
7067
singleTableSchema
71-
>>>>>>> origin/master
7268
: colTypeList EOF
7369
;
7470

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/AstBuilder.scala

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -89,16 +89,9 @@ class AstBuilder(conf: SQLConf) extends SqlBaseBaseVisitor[AnyRef] with Logging
8989
visitSparkDataType(ctx.dataType)
9090
}
9191

92-
<<<<<<< HEAD
93-
override def visitStandaloneColTypeList(ctx: StandaloneColTypeListContext): Seq[StructField] =
94-
withOrigin(ctx) {
95-
visitColTypeList(ctx.colTypeList)
96-
}
97-
=======
9892
override def visitSingleTableSchema(ctx: SingleTableSchemaContext): StructType = {
9993
withOrigin(ctx)(StructType(visitColTypeList(ctx.colTypeList)))
10094
}
101-
>>>>>>> origin/master
10295

10396
/* ********************************************************************************************
10497
* Plan parsing

sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParseDriver.scala

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,7 @@ abstract class AbstractSqlParser extends ParserInterface with Logging {
6161
* definitions which will preserve the correct Hive metadata.
6262
*/
6363
override def parseTableSchema(sqlText: String): StructType = parse(sqlText) { parser =>
64-
<<<<<<< HEAD
65-
StructType(astBuilder.visitStandaloneColTypeList(parser.standaloneColTypeList()))
66-
=======
6764
astBuilder.visitSingleTableSchema(parser.singleTableSchema())
68-
>>>>>>> origin/master
6965
}
7066

7167
/** Creates LogicalPlan for a given SQL string. */

sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFilterSuite.scala

Lines changed: 2 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,6 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
5757
expected: Seq[Row]): Unit = {
5858
val output = predicate.collect { case a: Attribute => a }.distinct
5959

60-
<<<<<<< HEAD
6160
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true",
6261
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false",
6362
ParquetInputFormat.RECORD_FILTERING_ENABLED -> "true") {
@@ -67,7 +66,8 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
6766

6867
var maybeRelation: Option[HadoopFsRelation] = None
6968
val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect {
70-
case PhysicalOperation(_, filters, LogicalRelation(relation: HadoopFsRelation, _, _)) =>
69+
case PhysicalOperation(_, filters,
70+
LogicalRelation(relation: HadoopFsRelation, _, _, _)) =>
7171
maybeRelation = Some(relation)
7272
filters
7373
}.flatten.reduceLeftOption(_ && _)
@@ -83,34 +83,6 @@ class ParquetFilterSuite extends QueryTest with ParquetTest with SharedSQLContex
8383
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
8484
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
8585
maybeFilter.exists(_.getClass === filterClass)
86-
=======
87-
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> "true") {
88-
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "false") {
89-
val query = df
90-
.select(output.map(e => Column(e)): _*)
91-
.where(Column(predicate))
92-
93-
var maybeRelation: Option[HadoopFsRelation] = None
94-
val maybeAnalyzedPredicate = query.queryExecution.optimizedPlan.collect {
95-
case PhysicalOperation(_, filters,
96-
LogicalRelation(relation: HadoopFsRelation, _, _, _)) =>
97-
maybeRelation = Some(relation)
98-
filters
99-
}.flatten.reduceLeftOption(_ && _)
100-
assert(maybeAnalyzedPredicate.isDefined, "No filter is analyzed from the given query")
101-
102-
val (_, selectedFilters, _) =
103-
DataSourceStrategy.selectFilters(maybeRelation.get, maybeAnalyzedPredicate.toSeq)
104-
assert(selectedFilters.nonEmpty, "No filter is pushed down")
105-
106-
selectedFilters.foreach { pred =>
107-
val maybeFilter = ParquetFilters.createFilter(df.schema, pred)
108-
assert(maybeFilter.isDefined, s"Couldn't generate filter predicate for $pred")
109-
// Doesn't bother checking type parameters here (e.g. `Eq[Integer]`)
110-
maybeFilter.exists(_.getClass === filterClass)
111-
}
112-
checker(stripSparkFilter(query), expected)
113-
>>>>>>> origin/master
11486
}
11587
checker(stripSparkFilter(query), expected)
11688
}

sql/core/src/test/scala/org/apache/spark/sql/jdbc/JDBCWriteSuite.scala

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -468,11 +468,7 @@ class JDBCWriteSuite extends SharedSQLContext with BeforeAndAfter {
468468
.option("createTableColumnTypes", "`name char(20)") // incorrectly quoted column
469469
.jdbc(url1, "TEST.USERDBTYPETEST", properties)
470470
}.getMessage()
471-
<<<<<<< HEAD
472-
assert(msg.contains("extraneous input '`' expecting"))
473-
=======
474471
assert(msg.contains("extraneous input"))
475-
>>>>>>> origin/master
476472
}
477473

478474
test("SPARK-10849: jdbc CreateTableColumnTypes duplicate columns") {

0 commit comments

Comments
 (0)