|
49 | 49 | import org.apache.hudi.common.table.view.FileSystemViewStorageType; |
50 | 50 | import org.apache.hudi.common.testutils.HoodieTestTable; |
51 | 51 | import org.apache.hudi.common.testutils.HoodieTestUtils; |
| 52 | +import org.apache.hudi.common.testutils.InProcessTimeGenerator; |
52 | 53 | import org.apache.hudi.common.util.CommitUtils; |
53 | 54 | import org.apache.hudi.io.util.FileIOUtils; |
54 | 55 | import org.apache.hudi.common.util.Option; |
@@ -703,19 +704,19 @@ public void testMultiWriterWithInsertsToDistinctPartitions(HoodieTableType table |
703 | 704 |
|
704 | 705 | // Create the first commit |
705 | 706 | SparkRDDWriteClient<?> client = getHoodieWriteClient(cfg); |
706 | | - createCommitWithInsertsForPartition(cfg, client, "000", "001", 100, "2016/03/01"); |
| 707 | + String firstCommitTime = InProcessTimeGenerator.createNewInstantTime(); |
| 708 | + createCommitWithInsertsForPartition(cfg, client, "000", firstCommitTime, 100, "2016/03/01"); |
707 | 709 | client.close(); |
708 | 710 | int numConcurrentWriters = 5; |
709 | 711 | ExecutorService executors = Executors.newFixedThreadPool(numConcurrentWriters); |
710 | 712 |
|
711 | 713 | List<Future<?>> futures = new ArrayList<>(numConcurrentWriters); |
712 | 714 | for (int loop = 0; loop < numConcurrentWriters; loop++) { |
713 | | - String newCommitTime = "00" + (loop + 2); |
714 | 715 | String partition = "2016/03/0" + (loop + 2); |
715 | 716 | futures.add(executors.submit(() -> { |
716 | 717 | try { |
717 | 718 | SparkRDDWriteClient<?> writeClient = getHoodieWriteClient(cfg); |
718 | | - createCommitWithInsertsForPartition(cfg, writeClient, "001", newCommitTime, 100, partition); |
| 719 | + createCommitWithInsertsForPartition(cfg, writeClient, "001", InProcessTimeGenerator.createNewInstantTime(), 100, partition); |
719 | 720 | writeClient.close(); |
720 | 721 | } catch (Exception e) { |
721 | 722 | throw new RuntimeException(e); |
|
0 commit comments