Skip to content

Commit 1d8d1d2

Browse files
authored
[8.17] Change the handling of passthrough dimenensions (#127752) (#127890) (#127905)
* Change the handling of passthrough dimenensions (#127752) (#127890) When downsampling an index that has a mapping with passthrough dimensions the downsampling process identifies the wrapper object as a dimension and it fails when it tried to retrieve the type. We did some prework to establish a shared framework in the internalClusterTest. For now it only includes setting up time series data stream helpers and a limited assertion helper for dimensions and metrics. This allows us to setup an internalClusterTest that captures this issue during downsampling in #125156. To fix this we refine the check that determines if a field is dimension, to skip wrapper field. Fixes #125156. * Adjust the test to match this version of the code
1 parent 7cd213c commit 1d8d1d2

File tree

9 files changed

+481
-343
lines changed

9 files changed

+481
-343
lines changed

docs/changelog/127752.yaml

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
pr: 127752
2+
summary: Downsampling does not consider passthrough fields as dimensions
3+
area: Downsampling
4+
type: bug
5+
issues:
6+
- 125156

x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleDisruptionIT.java

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -19,35 +19,23 @@
1919
import org.elasticsearch.cluster.metadata.IndexMetadata;
2020
import org.elasticsearch.common.settings.Settings;
2121
import org.elasticsearch.core.TimeValue;
22-
import org.elasticsearch.datastreams.DataStreamsPlugin;
2322
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService;
24-
import org.elasticsearch.plugins.Plugin;
2523
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
2624
import org.elasticsearch.test.ESIntegTestCase;
2725
import org.elasticsearch.test.InternalTestCluster;
2826
import org.elasticsearch.test.junit.annotations.TestLogging;
29-
import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin;
30-
import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin;
3127

32-
import java.util.Collection;
3328
import java.util.List;
3429
import java.util.concurrent.TimeUnit;
3530

36-
import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.getBackingIndices;
37-
import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.putTSDBIndexTemplate;
3831
import static org.hamcrest.Matchers.is;
3932
import static org.hamcrest.Matchers.notNullValue;
4033

4134
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0, numClientNodes = 4)
42-
public class DataStreamLifecycleDownsampleDisruptionIT extends ESIntegTestCase {
35+
public class DataStreamLifecycleDownsampleDisruptionIT extends DownsamplingIntegTestCase {
4336
private static final Logger logger = LogManager.getLogger(DataStreamLifecycleDownsampleDisruptionIT.class);
4437
public static final int DOC_COUNT = 50_000;
4538

46-
@Override
47-
protected Collection<Class<? extends Plugin>> nodePlugins() {
48-
return List.of(DataStreamsPlugin.class, LocalStateCompositeXPackPlugin.class, Downsample.class, AggregateMetricMapperPlugin.class);
49-
}
50-
5139
@Override
5240
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
5341
Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
@@ -76,8 +64,7 @@ public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception {
7664
)
7765
)
7866
.build();
79-
DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs(
80-
client(),
67+
setupTSDBDataStreamAndIngestDocs(
8168
dataStreamName,
8269
"1986-01-08T23:40:53.384Z",
8370
"2022-01-08T23:40:53.384Z",
@@ -88,15 +75,15 @@ public void testDataStreamLifecycleDownsampleRollingRestart() throws Exception {
8875

8976
// before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with
9077
// testing so DSL doesn't have to wait for the end_time to lapse)
91-
putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle);
92-
client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet();
78+
putTSDBIndexTemplate(dataStreamName, null, null, lifecycle);
79+
safeGet(client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)));
9380

9481
// DSL runs every second and it has to tail forcemerge the index (2 seconds) and mark it as read-only (2s) before it starts
9582
// downsampling. This sleep here tries to get as close as possible to having disruption during the downsample execution.
9683
long sleepTime = randomLongBetween(3000, 4500);
9784
logger.info("-> giving data stream lifecycle [{}] millis to make some progress before starting the disruption", sleepTime);
9885
Thread.sleep(sleepTime);
99-
List<String> backingIndices = getBackingIndices(client(), dataStreamName);
86+
List<String> backingIndices = getDataStreamBackingIndexNames(dataStreamName);
10087
// first generation index
10188
String sourceIndex = backingIndices.get(0);
10289

x-pack/plugin/downsample/src/internalClusterTest/java/org/elasticsearch/xpack/downsample/DataStreamLifecycleDownsampleIT.java

Lines changed: 26 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -15,35 +15,23 @@
1515
import org.elasticsearch.cluster.metadata.DataStreamLifecycle.Downsampling;
1616
import org.elasticsearch.common.settings.Settings;
1717
import org.elasticsearch.core.TimeValue;
18-
import org.elasticsearch.datastreams.DataStreamsPlugin;
1918
import org.elasticsearch.datastreams.lifecycle.DataStreamLifecycleService;
20-
import org.elasticsearch.plugins.Plugin;
2119
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval;
22-
import org.elasticsearch.test.ESIntegTestCase;
2320
import org.elasticsearch.test.junit.annotations.TestLogging;
24-
import org.elasticsearch.xpack.aggregatemetric.AggregateMetricMapperPlugin;
25-
import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin;
2621

27-
import java.util.Collection;
2822
import java.util.HashSet;
2923
import java.util.List;
3024
import java.util.Set;
3125
import java.util.concurrent.TimeUnit;
3226

3327
import static org.elasticsearch.cluster.metadata.ClusterChangedEventUtils.indicesCreated;
3428
import static org.elasticsearch.cluster.metadata.DataStreamTestHelper.backingIndexEqualTo;
35-
import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.getBackingIndices;
36-
import static org.elasticsearch.xpack.downsample.DataStreamLifecycleDriver.putTSDBIndexTemplate;
29+
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
3730
import static org.hamcrest.Matchers.is;
3831

39-
public class DataStreamLifecycleDownsampleIT extends ESIntegTestCase {
32+
public class DataStreamLifecycleDownsampleIT extends DownsamplingIntegTestCase {
4033
public static final int DOC_COUNT = 50_000;
4134

42-
@Override
43-
protected Collection<Class<? extends Plugin>> nodePlugins() {
44-
return List.of(DataStreamsPlugin.class, LocalStateCompositeXPackPlugin.class, Downsample.class, AggregateMetricMapperPlugin.class);
45-
}
46-
4735
@Override
4836
protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) {
4937
Settings.Builder settings = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings));
@@ -66,8 +54,7 @@ public void testDownsampling() throws Exception {
6654
)
6755
.build();
6856

69-
DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs(
70-
client(),
57+
setupTSDBDataStreamAndIngestDocs(
7158
dataStreamName,
7259
"1986-01-08T23:40:53.384Z",
7360
"2022-01-08T23:40:53.384Z",
@@ -76,7 +63,7 @@ public void testDownsampling() throws Exception {
7663
"1990-09-09T18:00:00"
7764
);
7865

79-
List<String> backingIndices = getBackingIndices(client(), dataStreamName);
66+
List<String> backingIndices = getDataStreamBackingIndexNames(dataStreamName);
8067
String firstGenerationBackingIndex = backingIndices.get(0);
8168
String oneSecondDownsampleIndex = "downsample-5m-" + firstGenerationBackingIndex;
8269
String tenSecondsDownsampleIndex = "downsample-10m-" + firstGenerationBackingIndex;
@@ -93,7 +80,7 @@ public void testDownsampling() throws Exception {
9380
});
9481
// before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with
9582
// testing so DSL doesn't have to wait for the end_time to lapse)
96-
putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle);
83+
putTSDBIndexTemplate(dataStreamName, null, null, lifecycle);
9784

9885
client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet();
9986

@@ -109,7 +96,7 @@ public void testDownsampling() throws Exception {
10996
}, 30, TimeUnit.SECONDS);
11097

11198
assertBusy(() -> {
112-
List<String> dsBackingIndices = getBackingIndices(client(), dataStreamName);
99+
List<String> dsBackingIndices = getDataStreamBackingIndexNames(dataStreamName);
113100

114101
assertThat(dsBackingIndices.size(), is(2));
115102
String writeIndex = dsBackingIndices.get(1);
@@ -136,8 +123,7 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception
136123
)
137124
)
138125
.build();
139-
DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs(
140-
client(),
126+
setupTSDBDataStreamAndIngestDocs(
141127
dataStreamName,
142128
"1986-01-08T23:40:53.384Z",
143129
"2022-01-08T23:40:53.384Z",
@@ -146,7 +132,7 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception
146132
"1990-09-09T18:00:00"
147133
);
148134

149-
List<String> backingIndices = getBackingIndices(client(), dataStreamName);
135+
List<String> backingIndices = getDataStreamBackingIndexNames(dataStreamName);
150136
String firstGenerationBackingIndex = backingIndices.get(0);
151137
String oneSecondDownsampleIndex = "downsample-5m-" + firstGenerationBackingIndex;
152138
String tenSecondsDownsampleIndex = "downsample-10m-" + firstGenerationBackingIndex;
@@ -163,7 +149,7 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception
163149
});
164150
// before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with
165151
// testing so DSL doesn't have to wait for the end_time to lapse)
166-
putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle);
152+
putTSDBIndexTemplate(dataStreamName, null, null, lifecycle);
167153
client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet();
168154

169155
assertBusy(() -> {
@@ -173,7 +159,7 @@ public void testDownsamplingOnlyExecutesTheLastMatchingRound() throws Exception
173159
}, 30, TimeUnit.SECONDS);
174160

175161
assertBusy(() -> {
176-
List<String> dsBackingIndices = getBackingIndices(client(), dataStreamName);
162+
List<String> dsBackingIndices = getDataStreamBackingIndexNames(dataStreamName);
177163

178164
assertThat(dsBackingIndices.size(), is(2));
179165
String writeIndex = dsBackingIndices.get(1);
@@ -201,8 +187,7 @@ public void testUpdateDownsampleRound() throws Exception {
201187
)
202188
.build();
203189

204-
DataStreamLifecycleDriver.setupTSDBDataStreamAndIngestDocs(
205-
client(),
190+
setupTSDBDataStreamAndIngestDocs(
206191
dataStreamName,
207192
"1986-01-08T23:40:53.384Z",
208193
"2022-01-08T23:40:53.384Z",
@@ -211,7 +196,7 @@ public void testUpdateDownsampleRound() throws Exception {
211196
"1990-09-09T18:00:00"
212197
);
213198

214-
List<String> backingIndices = getBackingIndices(client(), dataStreamName);
199+
List<String> backingIndices = getDataStreamBackingIndexNames(dataStreamName);
215200
String firstGenerationBackingIndex = backingIndices.get(0);
216201
String oneSecondDownsampleIndex = "downsample-5m-" + firstGenerationBackingIndex;
217202
String tenSecondsDownsampleIndex = "downsample-10m-" + firstGenerationBackingIndex;
@@ -228,8 +213,8 @@ public void testUpdateDownsampleRound() throws Exception {
228213
});
229214
// before we rollover we update the index template to remove the start/end time boundaries (they're there just to ease with
230215
// testing so DSL doesn't have to wait for the end_time to lapse)
231-
putTSDBIndexTemplate(client(), dataStreamName, null, null, lifecycle);
232-
client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)).actionGet();
216+
putTSDBIndexTemplate(dataStreamName, null, null, lifecycle);
217+
safeGet(client().execute(RolloverAction.INSTANCE, new RolloverRequest(dataStreamName, null)));
233218

234219
assertBusy(() -> {
235220
assertThat(witnessedDownsamplingIndices.size(), is(1));
@@ -238,7 +223,7 @@ public void testUpdateDownsampleRound() throws Exception {
238223
}, 30, TimeUnit.SECONDS);
239224

240225
assertBusy(() -> {
241-
List<String> dsBackingIndices = getBackingIndices(client(), dataStreamName);
226+
List<String> dsBackingIndices = getDataStreamBackingIndexNames(dataStreamName);
242227
assertThat(dsBackingIndices.size(), is(2));
243228
String writeIndex = dsBackingIndices.get(1);
244229
assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2));
@@ -247,22 +232,23 @@ public void testUpdateDownsampleRound() throws Exception {
247232

248233
// update the lifecycle so that it only has one round, for the same `after` parameter as before, but a different interval
249234
// the different interval should yield a different downsample index name so we expect the data stream lifecycle to get the previous
250-
// `10s` interval downsample index, downsample it to `30s` and replace it in the data stream instead of the `10s` one.
235+
// `10s` interval downsample index, downsample it to `20m` and replace it in the data stream instead of the `10s` one.
251236
DataStreamLifecycle updatedLifecycle = DataStreamLifecycle.newBuilder()
252237
.downsampling(
253238
new Downsampling(
254239
List.of(new Downsampling.Round(TimeValue.timeValueMillis(10), new DownsampleConfig(new DateHistogramInterval("20m"))))
255240
)
256241
)
257242
.build();
258-
259-
client().execute(
260-
PutDataStreamLifecycleAction.INSTANCE,
261-
new PutDataStreamLifecycleAction.Request(
262-
TEST_REQUEST_TIMEOUT,
263-
TEST_REQUEST_TIMEOUT,
264-
new String[] { dataStreamName },
265-
updatedLifecycle
243+
assertAcked(
244+
client().execute(
245+
PutDataStreamLifecycleAction.INSTANCE,
246+
new PutDataStreamLifecycleAction.Request(
247+
TEST_REQUEST_TIMEOUT,
248+
TEST_REQUEST_TIMEOUT,
249+
new String[] { dataStreamName },
250+
updatedLifecycle
251+
)
266252
)
267253
);
268254

@@ -271,7 +257,7 @@ public void testUpdateDownsampleRound() throws Exception {
271257
assertBusy(() -> {
272258
assertThat(indexExists(tenSecondsDownsampleIndex), is(false));
273259

274-
List<String> dsBackingIndices = getBackingIndices(client(), dataStreamName);
260+
List<String> dsBackingIndices = getDataStreamBackingIndexNames(dataStreamName);
275261
assertThat(dsBackingIndices.size(), is(2));
276262
String writeIndex = dsBackingIndices.get(1);
277263
assertThat(writeIndex, backingIndexEqualTo(dataStreamName, 2));

0 commit comments

Comments
 (0)