@@ -243,40 +243,43 @@ object SQLConf {
243243
244244 val PREFER_COLUMN_OVER_LCA_IN_ARRAY_INDEX =
245245 buildConf(" spark.sql.analyzer.preferColumnOverLcaInArrayIndex" )
246- .internal()
247- .doc(
248- " When true, prefer the column from the underlying relation over the lateral column alias " +
249- " reference with the same name (see SPARK-53734). "
250- )
251- .booleanConf
252- .createWithDefault(true )
246+ .internal()
247+ .version( " 4.1.0 " )
248+ .doc(
249+ " When true, prefer the column from the underlying relation over the lateral column alias " +
250+ " reference with the same name (see SPARK-53734). " )
251+ .booleanConf
252+ .createWithDefault(true )
253253
254254 val DONT_DEDUPLICATE_EXPRESSION_IF_EXPR_ID_IN_OUTPUT =
255255 buildConf(" spark.sql.analyzer.dontDeduplicateExpressionIfExprIdInOutput" )
256- .internal()
257- .doc(
258- " DeduplicateRelations shouldn't remap expressions to new ExprIds if old ExprId still " +
259- " exists in output." )
260- .booleanConf
261- .createWithDefault(true )
256+ .internal()
257+ .version(" 4.1.0" )
258+ .doc(
259+ " DeduplicateRelations shouldn't remap expressions to new ExprIds if old ExprId still " +
260+ " exists in output." )
261+ .booleanConf
262+ .createWithDefault(true )
262263
263264 val UNION_IS_RESOLVED_WHEN_DUPLICATES_PER_CHILD_RESOLVED =
264265 buildConf(" spark.sql.analyzer.unionIsResolvedWhenDuplicatesPerChildResolved" )
265- .internal()
266- .doc(
267- " When true, union should only be resolved once there are no duplicate attributes in " +
268- " each branch." )
269- .booleanConf
270- .createWithDefault(true )
266+ .internal()
267+ .version(" 4.1.0" )
268+ .doc(
269+ " When true, union should only be resolved once there are no duplicate attributes in " +
270+ " each branch." )
271+ .booleanConf
272+ .createWithDefault(true )
271273
272274 val ONLY_NECESSARY_AND_UNIQUE_METADATA_COLUMNS =
273275 buildConf(" spark.sql.analyzer.uniqueNecessaryMetadataColumns" )
274- .internal()
275- .doc(
276- " When this conf is enabled, AddMetadataColumns rule should only add necessary metadata " +
277- " columns and only if those columns are not already present in the project list." )
278- .booleanConf
279- .createWithDefault(true )
276+ .internal()
277+ .version(" 4.1.0" )
278+ .doc(
279+ " When this conf is enabled, AddMetadataColumns rule should only add necessary metadata " +
280+ " columns and only if those columns are not already present in the project list." )
281+ .booleanConf
282+ .createWithDefault(true )
280283
281284 val BLOCK_CREATE_TEMP_TABLE_USING_PROVIDER =
282285 buildConf(" spark.sql.legacy.blockCreateTempTableUsingProvider" )
@@ -324,7 +327,7 @@ object SQLConf {
324327 " (AliasResolution.resolve, FunctionResolution.resolveFunction, etc)." +
325328 " This feature is currently under development."
326329 )
327- .version(" 4.0 .0" )
330+ .version(" 4.1 .0" )
328331 .booleanConf
329332 .createWithDefault(false )
330333
@@ -1057,7 +1060,7 @@ object SQLConf {
10571060 " An object with an explicitly set collation will not inherit the collation from the " +
10581061 " schema."
10591062 )
1060- .version(" 4.0 .0" )
1063+ .version(" 4.1 .0" )
10611064 .booleanConf
10621065 .createWithDefault(false )
10631066
@@ -1911,6 +1914,7 @@ object SQLConf {
19111914 val DATA_SOURCE_V2_JOIN_PUSHDOWN =
19121915 buildConf(" spark.sql.optimizer.datasourceV2JoinPushdown" )
19131916 .internal()
1917+ .version(" 4.1.0" )
19141918 .doc(" When this config is set to true, join is tried to be pushed down" +
19151919 " for DSv2 data sources in V2ScanRelationPushdown optimization rule." )
19161920 .booleanConf
@@ -1919,6 +1923,7 @@ object SQLConf {
19191923 val DATA_SOURCE_V2_EXPR_FOLDING =
19201924 buildConf(" spark.sql.optimizer.datasourceV2ExprFolding" )
19211925 .internal()
1926+ .version(" 4.1.0" )
19221927 .doc(" When this config is set to true, do safe constant folding for the " +
19231928 " expressions before translation and pushdown." )
19241929 .booleanConf
@@ -2549,6 +2554,7 @@ object SQLConf {
25492554 val STATE_STORE_MAINTENANCE_SHUTDOWN_TIMEOUT =
25502555 buildConf(" spark.sql.streaming.stateStore.maintenanceShutdownTimeout" )
25512556 .internal()
2557+ .version(" 4.1.0" )
25522558 .doc(" Timeout in seconds for maintenance pool operations to complete on shutdown" )
25532559 .timeConf(TimeUnit .SECONDS )
25542560 .createWithDefault(300L )
@@ -2565,6 +2571,7 @@ object SQLConf {
25652571 val STATE_STORE_MAINTENANCE_PROCESSING_TIMEOUT =
25662572 buildConf(" spark.sql.streaming.stateStore.maintenanceProcessingTimeout" )
25672573 .internal()
2574+ .version(" 4.1.0" )
25682575 .doc(" Timeout in seconds to wait for maintenance to process this partition." )
25692576 .timeConf(TimeUnit .SECONDS )
25702577 .createWithDefault(30L )
@@ -2757,6 +2764,7 @@ object SQLConf {
27572764 " Note: For structured streaming, this configuration cannot be changed between query " +
27582765 " restarts from the same checkpoint location." )
27592766 .internal()
2767+ .version(" 4.1.0" )
27602768 .intConf
27612769 .checkValue(_ > 0 ,
27622770 " The value of spark.sql.streaming.internal.stateStore.partitions must be a positive " +
@@ -3575,7 +3583,7 @@ object SQLConf {
35753583 .doc(" When true, if a microbatch is retried, if a file already exists but its checksum " +
35763584 " file does not exist, the file checksum will not be created. This is useful for " +
35773585 " compatibility with files created before file checksums were enabled." )
3578- .version(" 4.2 .0" )
3586+ .version(" 4.1 .0" )
35793587 .booleanConf
35803588 .createWithDefault(true )
35813589
0 commit comments