Skip to content

Commit cdcb5bf

Browse files
committed
fix
1 parent a787f52 commit cdcb5bf

File tree

4 files changed

+7
-7
lines changed

4 files changed

+7
-7
lines changed

docs/layouts/shortcodes/generated/core_configuration.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,7 @@
201201
<td>Whether enable incremental clustering.</td>
202202
</tr>
203203
<tr>
204-
<td><h5>clustering.pre-write.enabled</h5></td>
204+
<td><h5>clustering.incremental.optimize-write</h5></td>
205205
<td style="word-wrap: break-word;">false</td>
206206
<td>Boolean</td>
207207
<td>Whether enable perform clustering before write phase when incremental clustering is enabled.</td>

paimon-api/src/main/java/org/apache/paimon/CoreOptions.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2071,8 +2071,8 @@ public InlineElement getDescription() {
20712071
"The duration after which a partition without new updates is considered a historical partition. "
20722072
+ "Historical partitions will be automatically fully clustered during the cluster operation.");
20732073

2074-
public static final ConfigOption<Boolean> CLUSTERING_PRE_WRITE_ENABLED =
2075-
key("clustering.pre-write.enabled")
2074+
public static final ConfigOption<Boolean> CLUSTERING_INCREMENTAL_OPTIMIZE_WRITE =
2075+
key("clustering.incremental.optimize-write")
20762076
.booleanType()
20772077
.defaultValue(false)
20782078
.withDescription(
@@ -3410,7 +3410,7 @@ public boolean clusteringIncrementalEnabled() {
34103410
}
34113411

34123412
public boolean preClusteringEnabled() {
3413-
return options.get(CLUSTERING_PRE_WRITE_ENABLED);
3413+
return options.get(CLUSTERING_INCREMENTAL_OPTIMIZE_WRITE);
34143414
}
34153415

34163416
public boolean bucketClusterEnabled() {

paimon-flink/paimon-flink-common/src/main/java/org/apache/paimon/flink/sink/FlinkTableSinkBase.java

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@
4141
import static org.apache.paimon.CoreOptions.CHANGELOG_PRODUCER;
4242
import static org.apache.paimon.CoreOptions.CLUSTERING_COLUMNS;
4343
import static org.apache.paimon.CoreOptions.CLUSTERING_INCREMENTAL;
44-
import static org.apache.paimon.CoreOptions.CLUSTERING_PRE_WRITE_ENABLED;
44+
import static org.apache.paimon.CoreOptions.CLUSTERING_INCREMENTAL_OPTIMIZE_WRITE;
4545
import static org.apache.paimon.CoreOptions.CLUSTERING_STRATEGY;
4646
import static org.apache.paimon.CoreOptions.MERGE_ENGINE;
4747
import static org.apache.paimon.flink.FlinkConnectorOptions.CLUSTERING_SAMPLE_FACTOR;
@@ -124,7 +124,7 @@ public SinkRuntimeProvider getSinkRuntimeProvider(Context context) {
124124
dataStream.getExecutionEnvironment(),
125125
dataStream.getTransformation()));
126126
if (!conf.get(CLUSTERING_INCREMENTAL)
127-
|| conf.get(CLUSTERING_PRE_WRITE_ENABLED)) {
127+
|| conf.get(CLUSTERING_INCREMENTAL_OPTIMIZE_WRITE)) {
128128
builder.clusteringIfPossible(
129129
conf.get(CLUSTERING_COLUMNS),
130130
conf.get(CLUSTERING_STRATEGY),

paimon-flink/paimon-flink-common/src/test/java/org/apache/paimon/flink/RangePartitionAndSortForAppendTableITCase.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -258,7 +258,7 @@ public void testClusteringPreWriteEnabled() throws Exception {
258258
batchSql(
259259
"INSERT INTO test_table /*+ OPTIONS('sink.clustering.by-columns' = 'col1', "
260260
+ "'sink.parallelism' = '10', 'sink.clustering.strategy' = 'zorder', "
261-
+ "'clustering.incremental' = 'true', 'clustering.pre-write.enabled' = 'true') */ "
261+
+ "'clustering.incremental' = 'true', 'clustering.incremental.optimize-write' = 'true') */ "
262262
+ "SELECT * FROM test_source");
263263
List<Row> sinkRows = batchSql("SELECT * FROM test_table");
264264
assertThat(sinkRows.size()).isEqualTo(SINK_ROW_NUMBER);

0 commit comments

Comments
 (0)