Skip to content

Commit 71251ea

Browse files
authored
Update diffs for Spark SQL core 2 tests. (apache#1641)
1 parent fdaec64 commit 71251ea

File tree

3 files changed

+87
-84
lines changed

3 files changed

+87
-84
lines changed

dev/diffs/3.4.3.diff

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
diff --git a/pom.xml b/pom.xml
2-
index d3544881af1..26ab186c65d 100644
2+
index d3544881af1..2860f13daf9 100644
33
--- a/pom.xml
44
+++ b/pom.xml
55
@@ -148,6 +148,8 @@
@@ -1893,22 +1893,23 @@ index 593bd7bb4ba..32af28b0238 100644
18931893
assert(shuffles2.size == 4)
18941894
val smj2 = findTopLevelSortMergeJoin(adaptive2)
18951895
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
1896-
index bd9c79e5b96..ab7584e768e 100644
1896+
index bd9c79e5b96..2ada8c28842 100644
18971897
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
18981898
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
18991899
@@ -27,6 +27,7 @@ import org.apache.spark.sql.catalyst.SchemaPruningTest
19001900
import org.apache.spark.sql.catalyst.expressions.Concat
19011901
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
19021902
import org.apache.spark.sql.catalyst.plans.logical.Expand
1903-
+import org.apache.spark.sql.comet.CometScanExec
1903+
+import org.apache.spark.sql.comet.{CometNativeScanExec, CometScanExec}
19041904
import org.apache.spark.sql.execution.FileSourceScanExec
19051905
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
19061906
import org.apache.spark.sql.functions._
1907-
@@ -867,6 +868,7 @@ abstract class SchemaPruningSuite
1907+
@@ -867,6 +868,8 @@ abstract class SchemaPruningSuite
19081908
val fileSourceScanSchemata =
19091909
collect(df.queryExecution.executedPlan) {
19101910
case scan: FileSourceScanExec => scan.requiredSchema
19111911
+ case scan: CometScanExec => scan.requiredSchema
1912+
+ case scan: CometNativeScanExec => scan.requiredSchema
19121913
}
19131914
assert(fileSourceScanSchemata.size === expectedSchemaCatalogStrings.size,
19141915
s"Found ${fileSourceScanSchemata.size} file sources in dataframe, " +

dev/diffs/3.5.4.diff

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
diff --git a/pom.xml b/pom.xml
2-
index 8dc47f391f9..8a3e72133a8 100644
2+
index 8dc47f391f9..94cd1cee609 100644
33
--- a/pom.xml
44
+++ b/pom.xml
55
@@ -152,6 +152,8 @@
@@ -1910,22 +1910,23 @@ index 05872d41131..a2c328b9742 100644
19101910
import FileFormat.{FILE_NAME, FILE_SIZE}
19111911
import ParquetFileFormat.ROW_INDEX
19121912
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
1913-
index bf496d6db21..1e92016830f 100644
1913+
index bf496d6db21..9bb57a9b4c6 100644
19141914
--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
19151915
+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/SchemaPruningSuite.scala
19161916
@@ -28,6 +28,7 @@ import org.apache.spark.sql.catalyst.expressions.Concat
19171917
import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
19181918
import org.apache.spark.sql.catalyst.plans.logical.Expand
19191919
import org.apache.spark.sql.catalyst.types.DataTypeUtils
1920-
+import org.apache.spark.sql.comet.CometScanExec
1920+
+import org.apache.spark.sql.comet.{CometNativeScanExec, CometScanExec}
19211921
import org.apache.spark.sql.execution.FileSourceScanExec
19221922
import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
19231923
import org.apache.spark.sql.functions._
1924-
@@ -868,6 +869,7 @@ abstract class SchemaPruningSuite
1924+
@@ -868,6 +869,8 @@ abstract class SchemaPruningSuite
19251925
val fileSourceScanSchemata =
19261926
collect(df.queryExecution.executedPlan) {
19271927
case scan: FileSourceScanExec => scan.requiredSchema
19281928
+ case scan: CometScanExec => scan.requiredSchema
1929+
+ case scan: CometNativeScanExec => scan.requiredSchema
19291930
}
19301931
assert(fileSourceScanSchemata.size === expectedSchemaCatalogStrings.size,
19311932
s"Found ${fileSourceScanSchemata.size} file sources in dataframe, " +

0 commit comments

Comments
 (0)