Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 0 additions & 9 deletions muted-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -566,15 +566,6 @@ tests:
- class: org.elasticsearch.index.mapper.vectors.DenseVectorFieldIndexTypeUpdateIT
method: testDenseVectorMappingUpdate {initialType=bbq_flat updateType=bbq_disk}
issue: https://github.com/elastic/elasticsearch/issues/132134
- class: org.elasticsearch.upgrades.RunningSnapshotIT
method: testRunningSnapshotCompleteAfterUpgrade {upgradedNodes=1}
issue: https://github.com/elastic/elasticsearch/issues/132135
- class: org.elasticsearch.upgrades.RunningSnapshotIT
method: testRunningSnapshotCompleteAfterUpgrade {upgradedNodes=2}
issue: https://github.com/elastic/elasticsearch/issues/132136
- class: org.elasticsearch.upgrades.RunningSnapshotIT
method: testRunningSnapshotCompleteAfterUpgrade {upgradedNodes=3}
issue: https://github.com/elastic/elasticsearch/issues/132137
- class: org.elasticsearch.index.mapper.vectors.DenseVectorFieldIndexTypeUpdateIT
method: testDenseVectorMappingUpdate {initialType=int4_flat updateType=int4_hnsw}
issue: https://github.com/elastic/elasticsearch/issues/132140
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,12 +12,10 @@
import com.carrotsearch.randomizedtesting.annotations.Name;

import org.elasticsearch.client.Request;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.test.rest.ObjectPath;

import java.io.IOException;
import java.util.Collection;
import java.util.Map;
import java.util.stream.Collectors;

Expand All @@ -26,7 +24,6 @@
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.not;

public class RunningSnapshotIT extends AbstractRollingUpgradeTestCase {
Expand All @@ -45,6 +42,13 @@ public void testRunningSnapshotCompleteAfterUpgrade() throws Exception {
.collect(Collectors.toUnmodifiableMap(Map.Entry::getKey, entry -> entry.getValue().get("name").toString()));
assertThat(nodeIdToNodeNames.values(), containsInAnyOrder("test-cluster-0", "test-cluster-1", "test-cluster-2"));

final var lastUpgradeNodeId = nodeIdToNodeNames.entrySet()
.stream()
.filter(entry -> "test-cluster-2".equals(entry.getValue()))
.map(Map.Entry::getKey)
.findFirst()
.orElseThrow(() -> new AssertionError("node id not found in " + nodeIdToNodeNames));

if (isOldCluster()) {
registerRepository(repositoryName, "fs", randomBoolean(), Settings.builder().put("location", "backup").build());
// create an index to have one shard per node
Expand All @@ -54,54 +58,41 @@ public void testRunningSnapshotCompleteAfterUpgrade() throws Exception {
indexDocs(indexName, between(10, 50));
}
flush(indexName, true);
// Signal shutdown to prevent snapshot from being completed
putShutdownMetadata(nodeIdToNodeNames.keySet());
// Signal shutdown for the last node to upgrade to prevent snapshot from being completed during the upgrade process
putShutdownMetadata(lastUpgradeNodeId);
createSnapshot(repositoryName, snapshotName, false);
assertRunningSnapshot(repositoryName, snapshotName);
} else {
if (isUpgradedCluster()) {
deleteShutdownMetadata(nodeIdToNodeNames.keySet());
assertNoShutdownMetadata(nodeIdToNodeNames.keySet());
deleteShutdownMetadata(lastUpgradeNodeId);
assertNoShutdownMetadata(lastUpgradeNodeId);
ensureGreen(indexName);
assertBusy(() -> assertCompletedSnapshot(repositoryName, snapshotName));
} else {
if (isFirstMixedCluster()) {
final var upgradedNodeIds = nodeIdToNodeNames.entrySet()
.stream()
.filter(entry -> "test-cluster-0".equals(entry.getValue()))
.map(Map.Entry::getKey)
.collect(Collectors.toUnmodifiableSet());
assertThat(upgradedNodeIds, hasSize(1));
deleteShutdownMetadata(upgradedNodeIds);
}
assertRunningSnapshot(repositoryName, snapshotName);
}
}
}

private void putShutdownMetadata(Collection<String> nodeIds) throws IOException {
for (String nodeId : nodeIds) {
final Request putShutdownRequest = new Request("PUT", "/_nodes/" + nodeId + "/shutdown");
putShutdownRequest.setJsonEntity("""
{
"type": "remove",
"reason": "test"
}""");
client().performRequest(putShutdownRequest);
}
private void putShutdownMetadata(String nodeId) throws IOException {
final Request putShutdownRequest = new Request("PUT", "/_nodes/" + nodeId + "/shutdown");
putShutdownRequest.setJsonEntity("""
{
"type": "remove",
"reason": "test"
}""");
client().performRequest(putShutdownRequest);
}

private void deleteShutdownMetadata(Collection<String> nodeIds) throws IOException {
for (String nodeId : nodeIds) {
final Request request = new Request("DELETE", "/_nodes/" + nodeId + "/shutdown");
request.addParameter(IGNORE_RESPONSE_CODES_PARAM, "404");
client().performRequest(request);
}
private void deleteShutdownMetadata(String nodeId) throws IOException {
final Request request = new Request("DELETE", "/_nodes/" + nodeId + "/shutdown");
request.addParameter(IGNORE_RESPONSE_CODES_PARAM, "404");
client().performRequest(request);
}

private void assertNoShutdownMetadata(Collection<String> nodeIds) throws IOException {
private void assertNoShutdownMetadata(String nodeId) throws IOException {
final ObjectPath responsePath = assertOKAndCreateObjectPath(
client().performRequest(new Request("GET", "/_nodes/" + Strings.collectionToCommaDelimitedString(nodeIds) + "/shutdown"))
client().performRequest(new Request("GET", "/_nodes/" + nodeId + "/shutdown"))
);
assertThat(responsePath.evaluate("nodes"), empty());
}
Expand Down