Skip to content

Commit a1d989c

Browse files
committed
Improvements based on review
1 parent 58a4994 commit a1d989c

File tree

1 file changed

+10
-10
lines changed

1 file changed

+10
-10
lines changed

x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ public void testUpdateDownsampleRound() throws Exception {
189189
List.of(
190190
new DataStreamLifecycle.DownsamplingRound(TimeValue.timeValueMillis(0), new DateHistogramInterval("5m")),
191191
// data stream lifecycle runs every 1 second, so by the time we forcemerge the backing index it would've been at
192-
// least 2 seconds since rollover. only the 10 seconds round should be executed.
192+
// least 2 seconds since rollover. Only the 10 seconds round should be executed.
193193
new DataStreamLifecycle.DownsamplingRound(TimeValue.timeValueMillis(10), new DateHistogramInterval("10m"))
194194
)
195195
)
@@ -240,7 +240,7 @@ public void testUpdateDownsampleRound() throws Exception {
240240

241241
// update the lifecycle so that it only has one round, for the same `after` parameter as before, but a different interval
242242
// the different interval should yield a different downsample index name so we expect the data stream lifecycle to get the previous
243-
// `10s` interval downsample index, downsample it to `20m` and replace it in the data stream instead of the `10s` one.
243+
// `10m` interval downsample index, downsample it to `20m` and replace it in the data stream instead of the `10m` one.
244244
DataStreamLifecycle updatedLifecycle = DataStreamLifecycle.dataLifecycleBuilder()
245245
.downsamplingRounds(
246246
List.of(new DataStreamLifecycle.DownsamplingRound(TimeValue.timeValueMillis(10), new DateHistogramInterval("20m")))
@@ -276,7 +276,7 @@ public void testUpdateDownsampleRound() throws Exception {
276276
* This test ensures that when we change the sampling method, the already downsampled indices will use the original sampling method,
277277
* while the raw data ones will be downsampled with the most recent configuration.
278278
* To achieve that, we set the following test:
279-
* 1. Create a data stream that is downsampled with the aggregate method.
279+
* 1. Create a data stream that is downsampled with a sampling method.
280280
* 2. Rollover and wait for the downsampling to occur
281281
* 3. Double the downsample interval (so it can downsample the first index as well) and change the sampling method.
282282
* 4. Rollover and wait for both indices to be downsampled with the new interval
@@ -299,7 +299,7 @@ public void testUpdateDownsampleSamplingMode() throws Exception {
299299
.buildTemplate();
300300

301301
// Start and end time there just to ease with testing, so DLM doesn't have to wait for the end_time to lapse
302-
// First backing index.
302+
// Creating the first backing index.
303303
setupTSDBDataStreamAndIngestDocs(
304304
dataStreamName,
305305
"1986-01-08T23:40:53.384Z",
@@ -310,7 +310,7 @@ public void testUpdateDownsampleSamplingMode() throws Exception {
310310
);
311311

312312
// before we roll over, we update the index template to have new start/end time boundaries
313-
// Second backing index
313+
// Creating the second backing index.
314314
putTSDBIndexTemplate(dataStreamName, "2022-01-08T23:40:53.384Z", "2023-01-08T23:40:53.384Z", lifecycle);
315315
RolloverResponse rolloverResponse = safeGet(client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)));
316316
assertTrue(rolloverResponse.isRolledOver());
@@ -319,16 +319,16 @@ public void testUpdateDownsampleSamplingMode() throws Exception {
319319
indexDocuments(dataStreamName, randomIntBetween(1, 1000), "2022-01-08T23:50:00");
320320

321321
// Ensure that the first backing index has been downsampled
322-
final var waitForInitialDownsampling = ClusterServiceUtils.addMasterTemporaryStateListener(clusterState -> {
322+
awaitClusterState(clusterState -> {
323323
final var dataStream = clusterState.metadata().getProject().dataStreams().get(dataStreamName);
324324
if (dataStream == null) {
325325
return false;
326326
}
327327
return dataStream.getIndices().size() > 1 && dataStream.getIndices().getFirst().getName().startsWith("downsample-");
328328
});
329-
safeAwait(waitForInitialDownsampling);
330-
331-
// update the lifecycle so that the sampling method is different.
329+
assertDownsamplingMethod(initialSamplingMethod, "downsample-5m-" + firstBackingIndex);
330+
// We change the sampling method, but also we double the downsampling interval. We expect the data stream lifecycle to get the
331+
// previous `5m` interval downsampled index, downsample it to `10m` and replace it in the data stream with the `5m` one.
332332
DataStreamLifecycle updatedLifecycle = DataStreamLifecycle.dataLifecycleBuilder()
333333
.downsamplingMethod(updatedSamplingMethod)
334334
.downsamplingRounds(
@@ -347,7 +347,7 @@ public void testUpdateDownsampleSamplingMode() throws Exception {
347347
)
348348
);
349349

350-
// Third backing index
350+
// We roll over one more time, so the second backing index will be eligible for downsampling
351351
putTSDBIndexTemplate(dataStreamName, null, null, lifecycle);
352352
rolloverResponse = safeGet(client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)));
353353
assertTrue(rolloverResponse.isRolledOver());

0 commit comments

Comments
 (0)