11diff --git a/pom.xml b/pom.xml
2- index 68e2c422a24..d971894ffe6 100644
2+ index a0e25ce4d8d..7db86212507 100644
33--- a/pom.xml
44+++ b/pom.xml
55@@ -152,6 +152,8 @@
@@ -38,7 +38,7 @@ index 68e2c422a24..d971894ffe6 100644
3838 </dependencyManagement>
3939
4040diff --git a/sql/core/pom.xml b/sql/core/pom.xml
41- index f08b33575fc..424e0da32fd 100644
41+ index e3d324c8edb..22342150522 100644
4242--- a/sql/core/pom.xml
4343+++ b/sql/core/pom.xml
4444@@ -77,6 +77,10 @@
@@ -216,7 +216,7 @@ index 0efe0877e9b..423d3b3d76d 100644
216216 -- SELECT_HAVING
217217 -- https://github.com/postgres/postgres/blob/REL_12_BETA2/src/test/regress/sql/select_having.sql
218218diff --git a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
219- index 9815cb816c9..95b5f9992b0 100644
219+ index e5494726695..00937f025c2 100644
220220--- a/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
221221+++ b/sql/core/src/test/scala/org/apache/spark/sql/CachedTableSuite.scala
222222@@ -38,7 +38,7 @@ import org.apache.spark.sql.catalyst.util.DateTimeConstants
@@ -239,7 +239,7 @@ index 9815cb816c9..95b5f9992b0 100644
239239
240240 test("A cached table preserves the partitioning and ordering of its cached SparkPlan") {
241241diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
242- index 5a8681aed97..da9d25e2eb4 100644
242+ index 6f3090d8908..c08a60fb0c2 100644
243243--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
244244+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameAggregateSuite.scala
245245@@ -28,7 +28,7 @@ import org.apache.spark.sql.catalyst.plans.logical.Expand
@@ -336,7 +336,7 @@ index 7ee18df3756..d09f70e5d99 100644
336336 assert(exchanges.size == 2)
337337 }
338338diff --git a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
339- index 47a311c71d5..342e71cfdd4 100644
339+ index a1d5d579338..c201d39cc78 100644
340340--- a/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
341341+++ b/sql/core/src/test/scala/org/apache/spark/sql/DataFrameWindowFunctionsSuite.scala
342342@@ -24,8 +24,9 @@ import org.apache.spark.sql.catalyst.expressions.{AttributeReference, Expression
@@ -624,7 +624,7 @@ index 7af826583bd..3c3def1eb67 100644
624624 assert(shuffleMergeJoins.size == 1)
625625 }
626626diff --git a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
627- index 4d256154c85..66a5473852d 100644
627+ index 44c8cb92fc3..f098beeca26 100644
628628--- a/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
629629+++ b/sql/core/src/test/scala/org/apache/spark/sql/JoinSuite.scala
630630@@ -31,7 +31,8 @@ import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
@@ -822,7 +822,7 @@ index 4d256154c85..66a5473852d 100644
822822 checkAnswer(fullJoinDF, Row(100))
823823 }
824824 }
825- @@ -1583 ,6 +1612 ,9 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan
825+ @@ -1611 ,6 +1640 ,9 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan
826826 Seq(semiJoinDF, antiJoinDF).foreach { df =>
827827 assert(collect(df.queryExecution.executedPlan) {
828828 case j: ShuffledHashJoinExec if j.ignoreDuplicatedKey == ignoreDuplicatedKey => true
@@ -832,7 +832,7 @@ index 4d256154c85..66a5473852d 100644
832832 }.size == 1)
833833 }
834834 }
835- @@ -1627 ,14 +1659 ,20 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan
835+ @@ -1655 ,14 +1687 ,20 @@ class JoinSuite extends QueryTest with SharedSparkSession with AdaptiveSparkPlan
836836
837837 test("SPARK-43113: Full outer join with duplicate stream-side references in condition (SMJ)") {
838838 def check(plan: SparkPlan): Unit = {
@@ -855,7 +855,7 @@ index 4d256154c85..66a5473852d 100644
855855 }
856856 dupStreamSideColTest("SHUFFLE_HASH", check)
857857 }
858- @@ -1770 ,7 +1808 ,8 @@ class ThreadLeakInSortMergeJoinSuite
858+ @@ -1798 ,7 +1836 ,8 @@ class ThreadLeakInSortMergeJoinSuite
859859 sparkConf.set(SHUFFLE_SPILL_NUM_ELEMENTS_FORCE_SPILL_THRESHOLD, 20))
860860 }
861861
@@ -879,7 +879,7 @@ index c26757c9cff..d55775f09d7 100644
879879 protected val baseResourcePath = {
880880 // use the same way as `SQLQueryTestSuite` to get the resource path
881881diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
882- index 793a0da6a86..181bfc16e4b 100644
882+ index 3cf2bfd17ab..49728c35c42 100644
883883--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
884884+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
885885@@ -1521,7 +1521,8 @@ class SQLQuerySuite extends QueryTest with SharedSparkSession with AdaptiveSpark
@@ -2050,10 +2050,10 @@ index 8e88049f51e..8f3cf8a0f80 100644
20502050 case _ =>
20512051 throw new AnalysisException("Can not match ParquetTable in the query.")
20522052diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
2053- index 4f8a9e39716..fb55ac7a955 100644
2053+ index 8ed9ef1630e..eed2a6f5ad5 100644
20542054--- a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
20552055+++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala
2056- @@ -1335 ,7 +1335 ,8 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession
2056+ @@ -1345 ,7 +1345 ,8 @@ class ParquetIOSuite extends QueryTest with ParquetTest with SharedSparkSession
20572057 }
20582058 }
20592059
@@ -2736,7 +2736,7 @@ index abe606ad9c1..2d930b64cca 100644
27362736 val tblTargetName = "tbl_target"
27372737 val tblSourceQualified = s"default.$tblSourceName"
27382738diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
2739- index e937173a590..ca06132102d 100644
2739+ index e937173a590..15feb013bae 100644
27402740--- a/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
27412741+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
27422742@@ -27,6 +27,7 @@ import scala.concurrent.duration._
@@ -2785,7 +2785,7 @@ index e937173a590..ca06132102d 100644
27852785 }
27862786 }
27872787
2788- @@ -242,6 +265,29 @@ private[sql] trait SQLTestUtilsBase
2788+ @@ -242,6 +265,20 @@ private[sql] trait SQLTestUtilsBase
27892789 protected override def _sqlContext: SQLContext = self.spark.sqlContext
27902790 }
27912791
@@ -2795,15 +2795,6 @@ index e937173a590..ca06132102d 100644
27952795+ protected def isCometEnabled: Boolean = SparkSession.isCometEnabled
27962796+
27972797+ /**
2798- + * Whether to enable ansi mode This is only effective when
2799- + * [[isCometEnabled]] returns true.
2800- + */
2801- + protected def enableCometAnsiMode: Boolean = {
2802- + val v = System.getenv("ENABLE_COMET_ANSI_MODE")
2803- + v != null && v.toBoolean
2804- + }
2805- +
2806- + /**
28072798+ * Whether Spark should only apply Comet scan optimization. This is only effective when
28082799+ * [[isCometEnabled]] returns true.
28092800+ */
@@ -2815,7 +2806,7 @@ index e937173a590..ca06132102d 100644
28152806 protected override def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
28162807 SparkSession.setActiveSession(spark)
28172808 super.withSQLConf(pairs: _*)(f)
2818- @@ -435,6 +481 ,8 @@ private[sql] trait SQLTestUtilsBase
2809+ @@ -435,6 +472 ,8 @@ private[sql] trait SQLTestUtilsBase
28192810 val schema = df.schema
28202811 val withoutFilters = df.queryExecution.executedPlan.transform {
28212812 case FilterExec(_, child) => child
@@ -2825,10 +2816,10 @@ index e937173a590..ca06132102d 100644
28252816
28262817 spark.internalCreateDataFrame(withoutFilters.execute(), schema)
28272818diff --git a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala
2828- index ed2e309fa07..a5ea58146ad 100644
2819+ index ed2e309fa07..81ae825ca79 100644
28292820--- a/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala
28302821+++ b/sql/core/src/test/scala/org/apache/spark/sql/test/SharedSparkSession.scala
2831- @@ -74,6 +74,31 @@ trait SharedSparkSessionBase
2822+ @@ -74,6 +74,20 @@ trait SharedSparkSessionBase
28322823 // this rule may potentially block testing of other optimization rules such as
28332824 // ConstantPropagation etc.
28342825 .set(SQLConf.OPTIMIZER_EXCLUDED_RULES.key, ConvertToLocalRelation.ruleName)
@@ -2838,24 +2829,13 @@ index ed2e309fa07..a5ea58146ad 100644
28382829+ .set("spark.sql.extensions", "org.apache.comet.CometSparkSessionExtensions")
28392830+ .set("spark.comet.enabled", "true")
28402831+ .set("spark.comet.parquet.respectFilterPushdown", "true")
2841- +
2842- + if (!isCometScanOnly) {
2843- + conf
2844- + .set("spark.comet.exec.enabled", "true")
2845- + .set("spark.shuffle.manager",
2846- + "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager")
2847- + .set("spark.comet.exec.shuffle.enabled", "true")
2848- + .set("spark.comet.memoryOverhead", "10g")
2849- + } else {
2850- + conf
2851- + .set("spark.comet.exec.enabled", "false")
2852- + .set("spark.comet.exec.shuffle.enabled", "false")
2853- + }
2854- +
2855- + if (enableCometAnsiMode) {
2856- + conf
2857- + .set("spark.sql.ansi.enabled", "true")
2858- + }
2832+ + .set("spark.comet.sparkToColumnar.enabled", "true")
2833+ + .set("spark.comet.sparkToColumnar.supportedOperatorList", "LocalTableScan")
2834+ + .set("spark.comet.exec.enabled", "true")
2835+ + .set("spark.shuffle.manager",
2836+ + "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager")
2837+ + .set("spark.comet.exec.shuffle.enabled", "true")
2838+ + .set("spark.comet.memoryOverhead", "10g")
28592839+ }
28602840 conf.set(
28612841 StaticSQLConf.WAREHOUSE_PATH,
@@ -2989,10 +2969,10 @@ index 6160c3e5f6c..0956d7d9edc 100644
29892969
29902970 test("SPARK-4963 DataFrame sample on mutable row return wrong result") {
29912971diff --git a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala
2992- index 1d646f40b3e..5babe505301 100644
2972+ index 1d646f40b3e..dde4a3b516f 100644
29932973--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala
29942974+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/test/TestHive.scala
2995- @@ -53,25 +53,54 @@ object TestHive
2975+ @@ -53,25 +53,41 @@ object TestHive
29962976 new SparkContext(
29972977 System.getProperty("spark.sql.test.master", "local[1]"),
29982978 "TestSQLContext",
@@ -3038,25 +3018,12 @@ index 1d646f40b3e..5babe505301 100644
30383018+ conf
30393019+ .set("spark.sql.extensions", "org.apache.comet.CometSparkSessionExtensions")
30403020+ .set("spark.comet.enabled", "true")
3041- +
3042- + val v = System.getenv("ENABLE_COMET_SCAN_ONLY")
3043- + if (v == null || !v.toBoolean) {
3044- + conf
3045- + .set("spark.comet.exec.enabled", "true")
3046- + .set("spark.shuffle.manager",
3047- + "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager")
3048- + .set("spark.comet.exec.shuffle.enabled", "true")
3049- + } else {
3050- + conf
3051- + .set("spark.comet.exec.enabled", "false")
3052- + .set("spark.comet.exec.shuffle.enabled", "false")
3053- + }
3054- +
3055- + val a = System.getenv("ENABLE_COMET_ANSI_MODE")
3056- + if (a != null && a.toBoolean) {
3057- + conf
3058- + .set("spark.sql.ansi.enabled", "true")
3059- + }
3021+ + .set("spark.comet.exec.enabled", "true")
3022+ + .set("spark.comet.sparkToColumnar.enabled", "true")
3023+ + .set("spark.comet.sparkToColumnar.supportedOperatorList", "LocalTableScan")
3024+ + .set("spark.shuffle.manager",
3025+ + "org.apache.spark.sql.comet.execution.shuffle.CometShuffleManager")
3026+ + .set("spark.comet.exec.shuffle.enabled", "true")
30603027+ }
30613028
30623029+ conf
0 commit comments