Skip to content

Commit fa27427

Browse files
authored
fix: use spark.comet.batchSize instead of conf.arrowMaxRecordsPerBatch for data that is coming from Java (#2196)
1 parent b51e175 commit fa27427

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

spark/src/main/scala/org/apache/spark/sql/comet/CometSparkToColumnarExec.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
3333
import org.apache.spark.sql.types._
3434
import org.apache.spark.sql.vectorized.ColumnarBatch
3535

36-
import org.apache.comet.DataTypeSupport
36+
import org.apache.comet.{CometConf, DataTypeSupport}
3737

3838
case class CometSparkToColumnarExec(child: SparkPlan)
3939
extends RowToColumnarTransition
@@ -94,7 +94,7 @@ case class CometSparkToColumnarExec(child: SparkPlan)
9494
val numInputRows = longMetric("numInputRows")
9595
val numOutputBatches = longMetric("numOutputBatches")
9696
val conversionTime = longMetric("conversionTime")
97-
val maxRecordsPerBatch = conf.arrowMaxRecordsPerBatch
97+
val maxRecordsPerBatch = CometConf.COMET_BATCH_SIZE.get(conf)
9898
val timeZoneId = conf.sessionLocalTimeZone
9999
val schema = child.schema
100100

0 commit comments

Comments
 (0)