Skip to content

Commit 072e6c7

Browse files
authored
[Test] Wait on master node for shard started (elastic#131172)
The shard started may not be visible on the master node if the wait is on a data node. In that case, the DiskThreshold monitor may use stale cluster state for releasing read-only blocks. This PR fixes it by waiting on the master node, which is the behaviour before elastic#129872. Resolves: elastic#131146
1 parent 102cef8 commit 072e6c7

File tree

2 files changed

+4
-17
lines changed

2 files changed

+4
-17
lines changed

muted-tests.yml

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -535,9 +535,6 @@ tests:
535535
- class: org.elasticsearch.test.rest.yaml.RcsCcsCommonYamlTestSuiteIT
536536
method: test {p0=field_caps/40_time_series/Get simple time series field caps}
537537
issue: https://github.com/elastic/elasticsearch/issues/131225
538-
- class: org.elasticsearch.cluster.routing.allocation.DiskThresholdMonitorIT
539-
method: testFloodStageExceeded
540-
issue: https://github.com/elastic/elasticsearch/issues/131146
541538
- class: org.elasticsearch.packaging.test.DockerTests
542539
method: test090SecurityCliPackaging
543540
issue: https://github.com/elastic/elasticsearch/issues/131107

server/src/internalClusterTest/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorIT.java

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
import org.elasticsearch.action.admin.cluster.node.stats.NodesStatsResponse;
1414
import org.elasticsearch.cluster.DiskUsageIntegTestCase;
1515
import org.elasticsearch.cluster.metadata.IndexMetadata;
16+
import org.elasticsearch.cluster.metadata.ProjectId;
1617
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
1718
import org.elasticsearch.cluster.routing.ShardRouting;
1819
import org.elasticsearch.cluster.routing.ShardRoutingState;
@@ -86,20 +87,9 @@ public void testFloodStageExceeded() throws Exception {
8687
// Verify that we can still move shards around even while blocked
8788
final String newDataNodeName = internalCluster().startDataOnlyNode();
8889
final String newDataNodeId = clusterAdmin().prepareNodesInfo(newDataNodeName).get().getNodes().get(0).getNode().getId();
89-
assertBusy(() -> {
90-
final ShardRouting primaryShard = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT)
91-
.clear()
92-
.setRoutingTable(true)
93-
.setNodes(true)
94-
.setIndices(indexName)
95-
.get()
96-
.getState()
97-
.routingTable()
98-
.index(indexName)
99-
.shard(0)
100-
.primaryShard();
101-
assertThat(primaryShard.state(), equalTo(ShardRoutingState.STARTED));
102-
assertThat(primaryShard.currentNodeId(), equalTo(newDataNodeId));
90+
awaitClusterState(state -> {
91+
final ShardRouting primaryShard = state.routingTable(ProjectId.DEFAULT).index(indexName).shard(0).primaryShard();
92+
return primaryShard.state() == ShardRoutingState.STARTED && newDataNodeId.equals(primaryShard.currentNodeId());
10393
});
10494

10595
// Verify that the block is removed once the shard migration is complete

0 commit comments

Comments
 (0)