|
18 | 18 |
|
19 | 19 | package org.apache.hudi.table.upgrade; |
20 | 20 |
|
| 21 | +import org.apache.hudi.client.SparkRDDWriteClient; |
| 22 | +import org.apache.hudi.client.WriteClientTestUtils; |
| 23 | +import org.apache.hudi.common.config.HoodieMetadataConfig; |
21 | 24 | import org.apache.hudi.common.config.RecordMergeMode; |
22 | 25 | import org.apache.hudi.common.model.HoodieIndexMetadata; |
| 26 | +import org.apache.hudi.common.model.HoodieRecord; |
| 27 | +import org.apache.hudi.common.model.HoodieTableType; |
23 | 28 | import org.apache.hudi.common.table.HoodieTableConfig; |
24 | 29 | import org.apache.hudi.common.table.HoodieTableMetaClient; |
25 | 30 | import org.apache.hudi.common.table.HoodieTableVersion; |
26 | 31 | import org.apache.hudi.common.table.timeline.HoodieInstant; |
27 | 32 | import org.apache.hudi.common.table.timeline.HoodieTimeline; |
28 | 33 | import org.apache.hudi.common.table.timeline.InstantFileNameGenerator; |
29 | 34 | import org.apache.hudi.common.table.timeline.versioning.TimelineLayoutVersion; |
| 35 | +import org.apache.hudi.common.testutils.HoodieTestDataGenerator; |
30 | 36 | import org.apache.hudi.common.testutils.HoodieTestUtils; |
31 | 37 | import org.apache.hudi.common.util.Option; |
32 | 38 | import org.apache.hudi.config.HoodieWriteConfig; |
33 | 39 | import org.apache.hudi.exception.HoodieUpgradeDowngradeException; |
34 | 40 | import org.apache.hudi.keygen.constant.KeyGeneratorType; |
35 | 41 | import org.apache.hudi.metadata.HoodieTableMetadata; |
| 42 | +import org.apache.hudi.metadata.MetadataPartitionType; |
36 | 43 | import org.apache.hudi.storage.StoragePath; |
37 | 44 | import org.apache.hudi.testutils.SparkClientFunctionalTestHarness; |
38 | 45 |
|
| 46 | +import org.apache.spark.api.java.JavaRDD; |
39 | 47 | import org.apache.spark.sql.Dataset; |
40 | 48 | import org.apache.spark.sql.Row; |
41 | 49 | import org.junit.jupiter.api.Disabled; |
|
49 | 57 | import org.slf4j.LoggerFactory; |
50 | 58 |
|
51 | 59 | import java.io.IOException; |
| 60 | +import java.net.URI; |
52 | 61 | import java.util.Arrays; |
53 | 62 | import java.util.HashSet; |
54 | 63 | import java.util.List; |
|
57 | 66 | import java.util.stream.Collectors; |
58 | 67 | import java.util.stream.Stream; |
59 | 68 |
|
| 69 | +import static org.apache.hudi.common.testutils.HoodieTestDataGenerator.getCommitTimeAtUTC; |
60 | 70 | import static org.apache.hudi.keygen.KeyGenUtils.getComplexKeygenErrorMessage; |
61 | 71 | import static org.junit.jupiter.api.Assertions.assertEquals; |
62 | 72 | import static org.junit.jupiter.api.Assertions.assertFalse; |
@@ -98,31 +108,31 @@ public void testUpgradeOrDowngrade(HoodieTableVersion fromVersion, HoodieTableVe |
98 | 108 | boolean isUpgrade = fromVersion.lesserThan(toVersion); |
99 | 109 | String operation = isUpgrade ? "upgrade" : "downgrade"; |
100 | 110 | LOG.info("Testing {} from version {} to {}", operation, fromVersion, toVersion); |
101 | | - |
| 111 | + |
102 | 112 | HoodieTableMetaClient originalMetaClient = loadFixtureTable(fromVersion, suffix); |
103 | 113 | assertEquals(fromVersion, originalMetaClient.getTableConfig().getTableVersion(), |
104 | 114 | "Fixture table should be at expected version"); |
105 | | - |
| 115 | + |
106 | 116 | HoodieWriteConfig config = createWriteConfig(originalMetaClient, true); |
107 | | - |
| 117 | + |
108 | 118 | int initialPendingCommits = originalMetaClient.getCommitsTimeline().filterPendingExcludingCompaction().countInstants(); |
109 | 119 | int initialCompletedCommits = originalMetaClient.getCommitsTimeline().filterCompletedInstants().countInstants(); |
110 | | - |
| 120 | + |
111 | 121 | Dataset<Row> originalData = readTableData(originalMetaClient, "before " + operation); |
112 | | - |
| 122 | + |
113 | 123 | // Confirm that there are log files before rollback and compaction operations |
114 | 124 | if (isRollbackAndCompactTransition(fromVersion, toVersion)) { |
115 | 125 | validateLogFilesCount(originalMetaClient, operation, suffix.equals("-mor")); |
116 | 126 | } |
117 | | - |
| 127 | + |
118 | 128 | new UpgradeDowngrade(originalMetaClient, config, context(), SparkUpgradeDowngradeHelper.getInstance()) |
119 | 129 | .run(toVersion, null); |
120 | | - |
| 130 | + |
121 | 131 | HoodieTableMetaClient resultMetaClient = HoodieTableMetaClient.builder() |
122 | 132 | .setConf(storageConf().newInstance()) |
123 | 133 | .setBasePath(originalMetaClient.getBasePath()) |
124 | 134 | .build(); |
125 | | - |
| 135 | + |
126 | 136 | assertTableVersionOnDataAndMetadataTable(resultMetaClient, toVersion); |
127 | 137 | validateVersionSpecificProperties(resultMetaClient, toVersion); |
128 | 138 | validateDataConsistency(originalData, resultMetaClient, "after " + operation); |
@@ -376,6 +386,62 @@ public void testComplexKeygenValidationDuringUpgradeDowngrade(HoodieTableVersion |
376 | 386 | } |
377 | 387 | } |
378 | 388 |
|
| 389 | + @ParameterizedTest |
| 390 | + @MethodSource("testMdtValidationDowngrade") |
| 391 | + public void testMdtPartitionNotDroppedWhenDowngradedFromTableVersionNine(HoodieTableType tableType, boolean mdtEnabled) throws Exception { |
| 392 | + HoodieTableVersion fromVersion = HoodieTableVersion.NINE; |
| 393 | + HoodieTableVersion toVersion = HoodieTableVersion.EIGHT; |
| 394 | + |
| 395 | + Properties props = new Properties(); |
| 396 | + props.put(HoodieTableConfig.TYPE.key(), tableType.name()); |
| 397 | + HoodieTableMetaClient metaClient = |
| 398 | + getHoodieMetaClient(storageConf(), URI.create(basePath()).getPath(), props); |
| 399 | + |
| 400 | + HoodieWriteConfig writeConfig = getConfigBuilder(true) |
| 401 | + .withPath(metaClient.getBasePath()) |
| 402 | + .withWriteTableVersion(fromVersion.versionCode()) |
| 403 | + .withMetadataConfig(HoodieMetadataConfig.newBuilder() |
| 404 | + .withEnableRecordIndex(true).build()) |
| 405 | + .withProps(props) |
| 406 | + .build(); |
| 407 | + |
| 408 | + SparkRDDWriteClient writeClient = new SparkRDDWriteClient(context(), writeConfig); |
| 409 | + String partitionPath = "2021/09/11"; |
| 410 | + HoodieTestDataGenerator dataGenerator = new HoodieTestDataGenerator(new String[]{partitionPath}); |
| 411 | + |
| 412 | + String instant1 = getCommitTimeAtUTC(1); |
| 413 | + List<HoodieRecord> records = dataGenerator.generateInserts(instant1, 100); |
| 414 | + JavaRDD<HoodieRecord> dataset = jsc().parallelize(records, 2); |
| 415 | + |
| 416 | + WriteClientTestUtils.startCommitWithTime(writeClient, instant1); |
| 417 | + writeClient.commit(instant1, writeClient.insert(dataset, instant1)); |
| 418 | + metaClient.reloadTableConfig(); |
| 419 | + |
| 420 | + // verify record index partition exists before downgrade |
| 421 | + assertTrue(metaClient.getTableConfig().getMetadataPartitions().contains(MetadataPartitionType.RECORD_INDEX.getPartitionPath())); |
| 422 | + |
| 423 | + HoodieWriteConfig.Builder upgradeWriteConfig = HoodieWriteConfig.newBuilder() |
| 424 | + .withPath(metaClient.getBasePath()) |
| 425 | + .withProps(props); |
| 426 | + if (mdtEnabled) { |
| 427 | + upgradeWriteConfig.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(true).withEnableRecordIndex(false).build()); |
| 428 | + } else { |
| 429 | + upgradeWriteConfig.withMetadataConfig(HoodieMetadataConfig.newBuilder().enable(false).build()); |
| 430 | + } |
| 431 | + |
| 432 | + new UpgradeDowngrade(metaClient, upgradeWriteConfig.build(), context(), SparkUpgradeDowngradeHelper.getInstance()) |
| 433 | + .run(toVersion, null); |
| 434 | + |
| 435 | + HoodieTableMetaClient resultMetaClient = HoodieTableMetaClient.builder() |
| 436 | + .setConf(storageConf().newInstance()) |
| 437 | + .setBasePath(metaClient.getBasePath()) |
| 438 | + .build(); |
| 439 | + |
| 440 | + resultMetaClient.reloadTableConfig(); |
| 441 | + // verify record index partition exists after downgrade |
| 442 | + assertTrue(resultMetaClient.getTableConfig().getMetadataPartitions().contains(MetadataPartitionType.RECORD_INDEX.getPartitionPath())); |
| 443 | + } |
| 444 | + |
379 | 445 | /** |
380 | 446 | * Load a fixture table from resources and copy it to a temporary location for testing. |
381 | 447 | */ |
@@ -536,6 +602,15 @@ private static Stream<Arguments> testArgsUpgradeDowngrade() { |
536 | 602 | ); |
537 | 603 | } |
538 | 604 |
|
| 605 | + private static Stream<Arguments> testMdtValidationDowngrade() { |
| 606 | + return Stream.of( |
| 607 | + Arguments.of(HoodieTableType.COPY_ON_WRITE, true), |
| 608 | + Arguments.of(HoodieTableType.COPY_ON_WRITE, false), |
| 609 | + Arguments.of(HoodieTableType.MERGE_ON_READ, true), |
| 610 | + Arguments.of(HoodieTableType.MERGE_ON_READ, false) |
| 611 | + ); |
| 612 | + } |
| 613 | + |
539 | 614 | private static Stream<Arguments> testArgsPayloadUpgradeDowngrade() { |
540 | 615 | String[] payloadTypes = { |
541 | 616 | "default", "overwrite", "partial", "postgres", "mysql", |
|
0 commit comments