|
31 | 31 | import org.apache.druid.java.util.common.parsers.CloseableIterator; |
32 | 32 | import org.apache.druid.metadata.LockFilterPolicy; |
33 | 33 | import org.apache.druid.rpc.indexing.NoopOverlordClient; |
| 34 | +import org.apache.druid.segment.TestDataSource; |
| 35 | +import org.apache.druid.server.coordinator.ClusterCompactionConfig; |
34 | 36 | import org.apache.druid.server.coordinator.CreateDataSegments; |
35 | 37 | import org.apache.druid.server.coordinator.DruidCompactionConfig; |
36 | 38 | import org.apache.druid.server.coordinator.InlineSchemaDataSourceCompactionConfig; |
@@ -111,7 +113,80 @@ public void testSimulateClusterCompactionConfigUpdate() |
111 | 113 | ); |
112 | 114 | Assert.assertEquals( |
113 | 115 | Collections.singletonList( |
114 | | - Arrays.asList("wiki", Intervals.of("2013-01-10/P1D"), 10, 1_000_000_000L, 1, "skip offset from latest[P1D]") |
| 116 | + List.of("wiki", Intervals.of("2013-01-10/P1D"), 10, 1_000_000_000L, 1, "skip offset from latest[P1D]") |
| 117 | + ), |
| 118 | + skippedTable.getRows() |
| 119 | + ); |
| 120 | + } |
| 121 | + |
| 122 | + @Test |
| 123 | + public void testSimulate_withFixedIntervalOrderPolicy() |
| 124 | + { |
| 125 | + final TestSegmentsMetadataManager segmentsMetadataManager = new TestSegmentsMetadataManager(); |
| 126 | + |
| 127 | + // Add some segments to the timeline |
| 128 | + final String dataSource = TestDataSource.WIKI; |
| 129 | + final List<DataSegment> wikiSegments |
| 130 | + = CreateDataSegments.ofDatasource(dataSource) |
| 131 | + .forIntervals(10, Granularities.DAY) |
| 132 | + .withNumPartitions(10) |
| 133 | + .startingAt("2013-01-01") |
| 134 | + .eachOfSizeInMb(100); |
| 135 | + wikiSegments.forEach(segmentsMetadataManager::addSegment); |
| 136 | + |
| 137 | + final FixedIntervalOrderPolicy policy = new FixedIntervalOrderPolicy( |
| 138 | + List.of( |
| 139 | + new FixedIntervalOrderPolicy.Candidate(dataSource, Intervals.of("2013-01-08/P1D")), |
| 140 | + new FixedIntervalOrderPolicy.Candidate(dataSource, Intervals.of("2013-01-04/P1D")) |
| 141 | + ) |
| 142 | + ); |
| 143 | + final CompactionSimulateResult simulateResult = simulator.simulateRunWithConfig( |
| 144 | + DruidCompactionConfig |
| 145 | + .empty() |
| 146 | + .withClusterConfig(new ClusterCompactionConfig(null, null, policy, null, null)) |
| 147 | + .withDatasourceConfig( |
| 148 | + InlineSchemaDataSourceCompactionConfig.builder().forDataSource(dataSource).build() |
| 149 | + ), |
| 150 | + segmentsMetadataManager.getRecentDataSourcesSnapshot(), |
| 151 | + CompactionEngine.NATIVE |
| 152 | + ); |
| 153 | + |
| 154 | + Assert.assertNotNull(simulateResult); |
| 155 | + |
| 156 | + final Map<CompactionStatus.State, Table> compactionStates = simulateResult.getCompactionStates(); |
| 157 | + Assert.assertNotNull(compactionStates); |
| 158 | + |
| 159 | + Assert.assertNull(compactionStates.get(CompactionStatus.State.COMPLETE)); |
| 160 | + Assert.assertNull(compactionStates.get(CompactionStatus.State.RUNNING)); |
| 161 | + |
| 162 | + final Table pendingTable = compactionStates.get(CompactionStatus.State.PENDING); |
| 163 | + Assert.assertEquals( |
| 164 | + List.of("dataSource", "interval", "numSegments", "bytes", "maxTaskSlots", "reasonToCompact"), |
| 165 | + pendingTable.getColumnNames() |
| 166 | + ); |
| 167 | + Assert.assertEquals( |
| 168 | + List.of( |
| 169 | + List.of("wiki", Intervals.of("2013-01-08/P1D"), 10, 1_000_000_000L, 1, "not compacted yet"), |
| 170 | + List.of("wiki", Intervals.of("2013-01-04/P1D"), 10, 1_000_000_000L, 1, "not compacted yet") |
| 171 | + ), |
| 172 | + pendingTable.getRows() |
| 173 | + ); |
| 174 | + |
| 175 | + final Table skippedTable = compactionStates.get(CompactionStatus.State.SKIPPED); |
| 176 | + Assert.assertEquals( |
| 177 | + List.of("dataSource", "interval", "numSegments", "bytes", "reasonToSkip"), |
| 178 | + skippedTable.getColumnNames() |
| 179 | + ); |
| 180 | + Assert.assertEquals( |
| 181 | + List.of( |
| 182 | + List.of("wiki", Intervals.of("2013-01-02/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 183 | + List.of("wiki", Intervals.of("2013-01-03/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 184 | + List.of("wiki", Intervals.of("2013-01-07/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 185 | + List.of("wiki", Intervals.of("2013-01-05/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 186 | + List.of("wiki", Intervals.of("2013-01-06/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 187 | + List.of("wiki", Intervals.of("2013-01-01/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 188 | + List.of("wiki", Intervals.of("2013-01-09/P1D"), 10, 1_000_000_000L, 1, "Rejected by search policy"), |
| 189 | + List.of("wiki", Intervals.of("2013-01-10/P1D"), 10, 1_000_000_000L, 1, "skip offset from latest[P1D]") |
115 | 190 | ), |
116 | 191 | skippedTable.getRows() |
117 | 192 | ); |
|
0 commit comments