Skip to content

Commit d130ec7

Browse files
committed
resolve merge conflicts for HUDI-9667
1 parent df4e4a0 commit d130ec7

File tree

4 files changed

+17
-20
lines changed

4 files changed

+17
-20
lines changed

hudi-client/hudi-client-common/src/main/java/org/apache/hudi/table/action/rollback/RestorePlanActionExecutor.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,7 @@ public RestorePlanActionExecutor(HoodieEngineContext context,
7070
public Option<HoodieRestorePlan> execute() {
7171
final HoodieInstant restoreInstant = instantGenerator.createNewInstant(HoodieInstant.State.REQUESTED, HoodieTimeline.RESTORE_ACTION, instantTime);
7272
HoodieTableMetaClient metaClient = table.getMetaClient();
73-
try (CompletionTimeQueryView completionTimeQueryView = metaClient.getTableFormat().getTimelineFactory().createCompletionTimeQueryView(metaClient)) {
73+
try (CompletionTimeQueryView completionTimeQueryView = metaClient.getTimelineLayout().getTimelineFactory().createCompletionTimeQueryView(metaClient)) {
7474
// Get all the commits on the timeline after the provided commit time
7575
// rollback pending clustering instants first before other instants (See HUDI-3362)
7676
List<HoodieInstant> pendingClusteringInstantsToRollback = table.getActiveTimeline().filterPendingReplaceOrClusteringTimeline()

hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/client/functional/TestHoodieFileSystemViews.java

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ private void assertFileSystemViews(HoodieWriteConfig writeConfig, boolean enable
190190
}
191191
}
192192

193-
private void assertForFSVEquality(HoodieTableFileSystemView fsv1, HoodieTableFileSystemView fsv2, boolean enableMdt) {
193+
public static void assertForFSVEquality(HoodieTableFileSystemView fsv1, HoodieTableFileSystemView fsv2, boolean enableMdt) {
194194
List<String> allPartitionNames = Arrays.asList(DEFAULT_FIRST_PARTITION_PATH, DEFAULT_SECOND_PARTITION_PATH, DEFAULT_THIRD_PARTITION_PATH);
195195
fsv1.loadPartitions(allPartitionNames);
196196
if (enableMdt) {
@@ -239,7 +239,7 @@ static void assertBaseFileEquality(HoodieBaseFile baseFile1, HoodieBaseFile base
239239
assertEquals(baseFile1.getFileSize(), baseFile2.getFileSize());
240240
}
241241

242-
private void assertFileSliceListEquality(List<FileSlice> fileSlices1, List<FileSlice> fileSlices2) {
242+
static void assertFileSliceListEquality(List<FileSlice> fileSlices1, List<FileSlice> fileSlices2) {
243243
assertEquals(fileSlices1.size(), fileSlices1.size());
244244
Map<Pair<String, String>, FileSlice> fileNameToFileSliceMap1 = new HashMap<>();
245245
fileSlices1.forEach(entry -> {
@@ -255,7 +255,7 @@ private void assertFileSliceListEquality(List<FileSlice> fileSlices1, List<FileS
255255
});
256256
}
257257

258-
private void assertFileSliceEquality(FileSlice fileSlice1, FileSlice fileSlice2) {
258+
static void assertFileSliceEquality(FileSlice fileSlice1, FileSlice fileSlice2) {
259259
assertEquals(fileSlice1.getBaseFile().isPresent(), fileSlice2.getBaseFile().isPresent());
260260
if (fileSlice1.getBaseFile().isPresent()) {
261261
assertBaseFileEquality(fileSlice1.getBaseFile().get(), fileSlice2.getBaseFile().get());

hudi-client/hudi-spark-client/src/test/java/org/apache/hudi/testutils/HoodieClientTestBase.java

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -623,8 +623,7 @@ protected void updateBatchWithoutCommit(String newCommitTime, List<HoodieRecord>
623623
.withAutoUpgradeVersion(false)
624624
.withWriteTableVersion(tableVersion.versionCode())
625625
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
626-
.withStreamingWriteEnabled(tableVersion.greaterThanOrEquals(HoodieTableVersion.EIGHT))
627-
.build())
626+
.build())
628627
.build();
629628

630629
try (SparkRDDWriteClient client = getHoodieWriteClient(hoodieWriteConfig)) {

hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/client/functional/TestSavepointRestoreMergeOnRead.java

Lines changed: 12 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
import org.apache.hudi.common.config.HoodieMetadataConfig;
2525
import org.apache.hudi.common.fs.FSUtils;
2626
import org.apache.hudi.common.function.SerializableFunctionUnchecked;
27+
import org.apache.hudi.common.model.HoodieCommitMetadata;
2728
import org.apache.hudi.common.model.HoodieFailedWritesCleaningPolicy;
2829
import org.apache.hudi.common.model.HoodieRecord;
2930
import org.apache.hudi.common.model.HoodieTableType;
@@ -42,6 +43,7 @@
4243
import org.apache.hudi.metadata.HoodieTableMetadata;
4344
import org.apache.hudi.storage.StoragePath;
4445
import org.apache.hudi.storage.StoragePathFilter;
46+
import org.apache.hudi.table.action.HoodieWriteMetadata;
4547
import org.apache.hudi.testutils.HoodieClientTestBase;
4648

4749
import org.apache.spark.api.java.JavaRDD;
@@ -62,8 +64,8 @@
6264
import java.util.stream.Collectors;
6365
import java.util.stream.Stream;
6466

67+
import static org.apache.hudi.client.functional.TestHoodieFileSystemViews.assertForFSVEquality;
6568
import static org.apache.hudi.common.table.timeline.HoodieTimeline.DELTA_COMMIT_ACTION;
66-
import static org.apache.hudi.functional.TestHoodieFileSystemViews.assertForFSVEquality;
6769
import static org.apache.hudi.testutils.Assertions.assertNoWriteErrors;
6870
import static org.junit.jupiter.api.Assertions.assertEquals;
6971
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -249,7 +251,6 @@ public void testRestoreWithFileGroupCreatedWithDeltaCommits(HoodieTableVersion t
249251
.withAutoUpgradeVersion(false)
250252
.withWriteTableVersion(tableVersion.versionCode())
251253
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
252-
.withStreamingWriteEnabled(tableVersion.greaterThanOrEquals(HoodieTableVersion.EIGHT))
253254
.build())
254255
.build();
255256

@@ -420,7 +421,7 @@ void testCleaningPendingCompaction(HoodieTableVersion tableVersion) throws Excep
420421
* No files will be cleaned up. Only rollback log appends.
421422
*/
422423
@ParameterizedTest
423-
@EnumSource(value = HoodieTableVersion.class, names = {"SIX", "NINE"})
424+
@EnumSource(value = HoodieTableVersion.class, names = {"SIX", "EIGHT"})
424425
void testCleaningCompletedRollback(HoodieTableVersion tableVersion) throws Exception {
425426
HoodieWriteConfig hoodieWriteConfig = getHoodieWriteConfigAndInitializeTable(HoodieCompactionConfig.newBuilder()
426427
.withMaxNumDeltaCommitsBeforeCompaction(3) // the 3rd delta_commit triggers compaction
@@ -456,7 +457,7 @@ void testCleaningCompletedRollback(HoodieTableVersion tableVersion) throws Excep
456457

457458
assertRowNumberEqualsTo(20);
458459
// write a delta_commit but does not commit
459-
updateBatchWithoutCommit(WriteClientTestUtils.createNewInstantTime(),
460+
updateBatchWithoutCommit(client.createNewInstantTime(),
460461
Objects.requireNonNull(baseRecordsToUpdate, "The records to update should not be null"), tableVersion);
461462
// rollback the delta_commit
462463
assertTrue(writeClient.rollbackFailedWrites(metaClient), "The last delta_commit should be rolled back");
@@ -485,7 +486,7 @@ void rollbackWithAsyncServices_compactionCompletesDuringCommit() {
485486
// Run compaction while delta-commit is in-flight
486487
Option<String> compactionInstant = client.scheduleCompaction(Option.empty());
487488
HoodieWriteMetadata result = client.compact(compactionInstant.get());
488-
client.commitCompaction(compactionInstant.get(), result, Option.empty());
489+
client.commitCompaction(compactionInstant.get(), (HoodieCommitMetadata) result.getCommitMetadata().get(), Option.empty());
489490
// commit the inflight delta commit
490491
client.commit(inflightCommit, writeStatus);
491492

@@ -522,7 +523,7 @@ void rollbackWithAsyncServices_commitCompletesDuringCompaction() {
522523
// commit the inflight delta commit
523524
client.commit(inflightCommit, writeStatus);
524525
// commit the compaction instant after the delta commit
525-
client.commitCompaction(compactionInstant.get(), result, Option.empty());
526+
client.commitCompaction(compactionInstant.get(), (HoodieCommitMetadata) result.getCommitMetadata().get(), Option.empty());
526527

527528
client.savepoint(inflightCommit, "user1", "Savepoint for commit that completed during compaction");
528529

@@ -543,7 +544,7 @@ void rollbackWithAsyncServices_commitCompletesDuringCompaction() {
543544
}
544545

545546
@ParameterizedTest
546-
@EnumSource(value = HoodieTableVersion.class, names = {"SIX", "NINE"})
547+
@EnumSource(value = HoodieTableVersion.class, names = {"SIX", "EIGHT"})
547548
void rollbackWithAsyncServices_commitStartsAndFinishesDuringCompaction(HoodieTableVersion tableVersion) {
548549
HoodieWriteConfig hoodieWriteConfig = getHoodieWriteConfigWithCompactionAndConcurrencyControl(tableVersion);
549550
try (SparkRDDWriteClient client = getHoodieWriteClient(hoodieWriteConfig)) {
@@ -560,7 +561,7 @@ void rollbackWithAsyncServices_commitStartsAndFinishesDuringCompaction(HoodieTab
560561
// commit the inflight delta commit
561562
client.commit(inflightCommit, writeStatus);
562563
// commit the compaction instant after the delta commit
563-
client.commitCompaction(compactionInstant.get(), result, Option.empty());
564+
client.commitCompaction(compactionInstant.get(), (HoodieCommitMetadata) result.getCommitMetadata().get(), Option.empty());
564565

565566
client.savepoint(inflightCommit, "user1", "Savepoint for commit that completed during compaction");
566567

@@ -581,7 +582,7 @@ void rollbackWithAsyncServices_commitStartsAndFinishesDuringCompaction(HoodieTab
581582
}
582583

583584
@ParameterizedTest
584-
@EnumSource(value = HoodieTableVersion.class, names = {"SIX", "NINE"})
585+
@EnumSource(value = HoodieTableVersion.class, names = {"SIX", "EIGHT"})
585586
void testMissingFileDoesNotFallRestore(HoodieTableVersion tableVersion) throws Exception {
586587
HoodieWriteConfig hoodieWriteConfig = getHoodieWriteConfigAndInitializeTable(HoodieCompactionConfig.newBuilder()
587588
.withMaxNumDeltaCommitsBeforeCompaction(4)
@@ -657,7 +658,6 @@ private HoodieWriteConfig getHoodieWriteConfigWithCompactionAndConcurrencyContro
657658
.withAutoUpgradeVersion(false)
658659
.withWriteTableVersion(tableVersion.versionCode())
659660
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
660-
.withStreamingWriteEnabled(tableVersion.greaterThanOrEquals(HoodieTableVersion.EIGHT))
661661
.build())
662662
.withProps(Collections.singletonMap(HoodieCompactionConfig.PARQUET_SMALL_FILE_LIMIT.key(), "0"))
663663
.build();
@@ -677,9 +677,9 @@ private void validateFilesMetadata(HoodieWriteConfig writeConfig) {
677677
HoodieTableFileSystemView metadataBasedView = (HoodieTableFileSystemView) FileSystemViewManager
678678
.createViewManager(context, writeConfig.getMetadataConfig(), viewStorageConfig, writeConfig.getCommonConfig(),
679679
(SerializableFunctionUnchecked<HoodieTableMetaClient, HoodieTableMetadata>) v1 ->
680-
metaClient.getTableFormat().getMetadataFactory().create(context, metaClient.getStorage(), writeConfig.getMetadataConfig(), writeConfig.getBasePath()))
680+
HoodieTableMetadata.create(context, metaClient.getStorage(), writeConfig.getMetadataConfig(), writeConfig.getBasePath()))
681681
.getFileSystemView(basePath);
682-
assertForFSVEquality(fileListingBasedView, metadataBasedView, true, Option.empty());
682+
assertForFSVEquality(fileListingBasedView, metadataBasedView, true);
683683
}
684684

685685
private String upsertBatch(SparkRDDWriteClient client, List<HoodieRecord> baseRecordsToUpdate) throws IOException {
@@ -698,7 +698,6 @@ private void compactWithoutCommit(String compactionInstantTime, HoodieTableVersi
698698
.withAutoUpgradeVersion(false)
699699
.withWriteTableVersion(tableVersion.versionCode())
700700
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
701-
.withStreamingWriteEnabled(tableVersion.greaterThanOrEquals(HoodieTableVersion.EIGHT))
702701
.build())
703702
.build();
704703

@@ -726,7 +725,6 @@ private HoodieWriteConfig getHoodieWriteConfigAndInitializeTable(HoodieCompactio
726725
.withAutoUpgradeVersion(false)
727726
.withWriteTableVersion(tableVersion.versionCode())
728727
.withMetadataConfig(HoodieMetadataConfig.newBuilder()
729-
.withStreamingWriteEnabled(tableVersion.greaterThanOrEquals(HoodieTableVersion.EIGHT))
730728
.build())
731729
.build();
732730
initMetaClient(HoodieTableType.MERGE_ON_READ, hoodieWriteConfig.getProps());

0 commit comments

Comments
 (0)