Skip to content

Commit 64bc640

Browse files
committed
fixing DataStreamLifecycleServiceIT
1 parent d31b768 commit 64bc640

File tree

1 file changed

+5
-11
lines changed

1 file changed

+5
-11
lines changed

modules/data-streams/src/internalClusterTest/java/org/elasticsearch/datastreams/lifecycle/DataStreamLifecycleServiceIT.java

Lines changed: 5 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,7 @@
104104
import static org.elasticsearch.index.IndexSettings.LIFECYCLE_ORIGINATION_DATE;
105105
import static org.elasticsearch.indices.ShardLimitValidator.SETTING_CLUSTER_MAX_SHARDS_PER_NODE;
106106
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
107+
import static org.hamcrest.Matchers.contains;
107108
import static org.hamcrest.Matchers.containsInAnyOrder;
108109
import static org.hamcrest.Matchers.containsString;
109110
import static org.hamcrest.Matchers.equalTo;
@@ -785,14 +786,10 @@ public void testErrorRecordingOnRetention() throws Exception {
785786
).get();
786787
DataStreamLifecycleHealthInfo dslHealthInfoOnHealthNode = healthNodeResponse.getHealthInfo().dslHealthInfo();
787788
assertThat(dslHealthInfoOnHealthNode, is(not(DataStreamLifecycleHealthInfo.NO_DSL_ERRORS)));
788-
// perhaps surprisingly rollover and delete are error-ing due to the read_only block on the first generation
789-
// index which prevents metadata updates so rolling over the data stream is also blocked (note that both indices error at
790-
// the same time so they'll have an equal retry count - the order becomes of the results, usually ordered by retry count,
791-
// becomes non deterministic, hence the dynamic matching of index name)
792-
assertThat(dslHealthInfoOnHealthNode.dslErrorsInfo().size(), is(2));
789+
assertThat(dslHealthInfoOnHealthNode.dslErrorsInfo().size(), is(1));
793790
DslErrorInfo errorInfo = dslHealthInfoOnHealthNode.dslErrorsInfo().get(0);
794791
assertThat(errorInfo.retryCount(), greaterThanOrEqualTo(3));
795-
assertThat(List.of(firstGenerationIndex, secondGenerationIndex).contains(errorInfo.indexName()), is(true));
792+
assertThat(errorInfo.indexName(), equalTo(firstGenerationIndex));
796793
});
797794

798795
GetHealthAction.Response healthResponse = client().execute(GetHealthAction.INSTANCE, new GetHealthAction.Request(true, 1000))
@@ -808,15 +805,12 @@ public void testErrorRecordingOnRetention() throws Exception {
808805
assertThat(dslIndicator.impacts(), is(STAGNATING_INDEX_IMPACT));
809806
assertThat(
810807
dslIndicator.symptom(),
811-
is("2 backing indices have repeatedly encountered errors whilst trying to advance in its lifecycle")
808+
is("A backing index has repeatedly encountered errors whilst trying to advance in its lifecycle")
812809
);
813810

814811
Diagnosis diagnosis = dslIndicator.diagnosisList().get(0);
815812
assertThat(diagnosis.definition(), is(STAGNATING_BACKING_INDICES_DIAGNOSIS_DEF));
816-
assertThat(
817-
diagnosis.affectedResources().get(0).getValues(),
818-
containsInAnyOrder(firstGenerationIndex, secondGenerationIndex)
819-
);
813+
assertThat(diagnosis.affectedResources().get(0).getValues(), contains(firstGenerationIndex));
820814
}
821815

822816
// let's mark the index as writeable and make sure it's deleted and the error store is empty

0 commit comments

Comments
 (0)