Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
cd9c2d3
Add min. read-only index version compatible to DiscoveryNode
tlrx Dec 16, 2024
6f2c230
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 16, 2024
3088482
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 16, 2024
2b4fa25
xcontent + tests
tlrx Dec 16, 2024
a3f4eeb
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 16, 2024
e768dff
fix yaml test
tlrx Dec 16, 2024
80d491b
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 16, 2024
d23319b
feedback
tlrx Dec 16, 2024
46f28c9
Allow searchable snapshots indices in version N-2 to join
tlrx Dec 16, 2024
ad95bca
Update docs/changelog/118785.yaml
tlrx Dec 16, 2024
1a2f9a0
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 17, 2024
e9397bf
Merge branch 'main' into 2024/12/16/allow-searchable-snapshot-n-2
tlrx Dec 17, 2024
fd4c1ca
doc
tlrx Dec 17, 2024
85d604a
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 17, 2024
ba8929a
fix test
tlrx Dec 17, 2024
a44fe1f
Merge branch 'main' into 2024/12/16/allow-searchable-snapshot-n-2
tlrx Dec 17, 2024
404a151
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 17, 2024
b41f78a
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 17, 2024
f7e8af4
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 17, 2024
8b1ebcd
Merge branch 'main' into 2024/12/16/add-min-readonly-index-compatible…
tlrx Dec 17, 2024
6ce8c73
Merge branch '2024/12/16/add-min-readonly-index-compatible-version-to…
tlrx Dec 17, 2024
1f5083d
add archive
tlrx Dec 17, 2024
7dcc42f
Merge branch 'main' into 2024/12/16/allow-searchable-snapshot-n-2
tlrx Dec 18, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions docs/changelog/118785.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
pr: 118785
summary: "[Draft] Allow searchable snapshot indices in version N-2 to be recovered\
\ on a V9 cluster"
area: Allocation
type: enhancement
issues: []
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
import com.carrotsearch.randomizedtesting.annotations.TestCaseOrdering;

import org.elasticsearch.client.Request;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.core.Strings;
import org.elasticsearch.test.cluster.ElasticsearchCluster;
import org.elasticsearch.test.cluster.local.LocalClusterConfigProvider;
import org.elasticsearch.test.cluster.local.distribution.DistributionType;
Expand All @@ -28,12 +30,15 @@

import java.util.Comparator;
import java.util.Locale;
import java.util.stream.IntStream;
import java.util.stream.Stream;

import static org.elasticsearch.test.cluster.util.Version.CURRENT;
import static org.elasticsearch.test.cluster.util.Version.fromString;
import static org.elasticsearch.test.rest.ObjectPath.createFromResponse;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;

/**
Expand Down Expand Up @@ -113,6 +118,12 @@ protected String suffix(String name) {
return name + '-' + getTestName().split(" ")[0].toLowerCase(Locale.ROOT);
}

protected Settings repositorySettings() {
return Settings.builder()
.put("location", REPOSITORY_PATH.getRoot().toPath().resolve(suffix("location")).toFile().getPath())
.build();
}

protected static Version clusterVersion() throws Exception {
var response = assertOK(client().performRequest(new Request("GET", "/")));
var responseBody = createFromResponse(response);
Expand All @@ -121,12 +132,24 @@ protected static Version clusterVersion() throws Exception {
return version;
}

protected static Version indexLuceneVersion(String indexName) throws Exception {
protected static Version indexVersion(String indexName) throws Exception {
var response = assertOK(client().performRequest(new Request("GET", "/" + indexName + "/_settings")));
int id = Integer.parseInt(createFromResponse(response).evaluate(indexName + ".settings.index.version.created"));
return new Version((byte) ((id / 1000000) % 100), (byte) ((id / 10000) % 100), (byte) ((id / 100) % 100));
}

protected static void indexDocs(String indexName, int numDocs) throws Exception {
var request = new Request("POST", "/_bulk");
var docs = new StringBuilder();
IntStream.range(0, numDocs).forEach(n -> docs.append(Strings.format("""
{"index":{"_id":"%s","_index":"%s"}}
{"test":"test"}
""", n, indexName)));
request.setJsonEntity(docs.toString());
var response = assertOK(client().performRequest(request));
assertThat(entityAsMap(response).get("errors"), allOf(notNullValue(), is(false)));
}

/**
* Execute the test suite with the parameters provided by the {@link #parameters()} in version order.
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,18 @@
package org.elasticsearch.lucene;

import org.elasticsearch.client.Request;
import org.elasticsearch.client.ResponseException;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.test.cluster.util.Version;

import java.util.stream.IntStream;

import static org.elasticsearch.test.rest.ObjectPath.createFromResponse;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;

public class LuceneCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase {

Expand All @@ -42,13 +40,7 @@ public void testRestoreIndex() throws Exception {
final int numDocs = 1234;

logger.debug("--> registering repository [{}]", repository);
registerRepository(
client(),
repository,
FsRepository.TYPE,
true,
Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build()
);
registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings());

if (VERSION_MINUS_2.equals(clusterVersion())) {
logger.debug("--> creating index [{}]", index);
Expand All @@ -63,17 +55,7 @@ public void testRestoreIndex() throws Exception {
);

logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
final var bulks = new StringBuilder();
IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format("""
{"index":{"_id":"%s","_index":"%s"}}
{"test":"test"}
""", n, index)));

var bulkRequest = new Request("POST", "/_bulk");
bulkRequest.setJsonEntity(bulks.toString());
var bulkResponse = client().performRequest(bulkRequest);
assertOK(bulkResponse);
assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false)));
indexDocs(index, numDocs);

logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
Expand All @@ -83,7 +65,7 @@ public void testRestoreIndex() throws Exception {
if (VERSION_MINUS_1.equals(clusterVersion())) {
ensureGreen(index);

assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2));
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);

logger.debug("--> deleting index [{}]", index);
Expand All @@ -106,9 +88,19 @@ public void testRestoreIndex() throws Exception {
"rename_replacement": "%s",
"include_aliases": false
}""", index, restoredIndex));
var responseBody = createFromResponse(client().performRequest(request));
assertThat(responseBody.evaluate("snapshot.shards.total"), equalTo((int) responseBody.evaluate("snapshot.shards.failed")));
assertThat(responseBody.evaluate("snapshot.shards.successful"), equalTo(0));
var responseException = expectThrows(ResponseException.class, () -> client().performRequest(request));
assertEquals(RestStatus.INTERNAL_SERVER_ERROR.getStatus(), responseException.getResponse().getStatusLine().getStatusCode());
assertThat(
responseException.getMessage(),
allOf(
containsString("cannot restore index"),
containsString("because it cannot be upgraded"),
containsString("has current compatibility version [" + VERSION_MINUS_2 + '-' + VERSION_MINUS_1.getMajor() + ".0.0]"),
containsString("but the minimum compatible version is [" + VERSION_MINUS_1.getMajor() + ".0.0]."),
containsString("It should be re-indexed in Elasticsearch " + VERSION_MINUS_1.getMajor() + ".x"),
containsString("before upgrading to " + VERSION_CURRENT)
)
);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,8 @@
import org.elasticsearch.repositories.fs.FsRepository;
import org.elasticsearch.test.cluster.util.Version;

import java.util.stream.IntStream;

import static org.elasticsearch.test.rest.ObjectPath.createFromResponse;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.notNullValue;

public class SearchableSnapshotCompatibilityIT extends AbstractLuceneIndexCompatibilityTestCase {

Expand All @@ -46,13 +41,7 @@ public void testSearchableSnapshot() throws Exception {
final int numDocs = 1234;

logger.debug("--> registering repository [{}]", repository);
registerRepository(
client(),
repository,
FsRepository.TYPE,
true,
Settings.builder().put("location", REPOSITORY_PATH.getRoot().getPath()).build()
);
registerRepository(client(), repository, FsRepository.TYPE, true, repositorySettings());

if (VERSION_MINUS_2.equals(clusterVersion())) {
logger.debug("--> creating index [{}]", index);
Expand All @@ -67,17 +56,7 @@ public void testSearchableSnapshot() throws Exception {
);

logger.debug("--> indexing [{}] docs in [{}]", numDocs, index);
final var bulks = new StringBuilder();
IntStream.range(0, numDocs).forEach(n -> bulks.append(Strings.format("""
{"index":{"_id":"%s","_index":"%s"}}
{"test":"test"}
""", n, index)));

var bulkRequest = new Request("POST", "/_bulk");
bulkRequest.setJsonEntity(bulks.toString());
var bulkResponse = client().performRequest(bulkRequest);
assertOK(bulkResponse);
assertThat(entityAsMap(bulkResponse).get("errors"), allOf(notNullValue(), is(false)));
indexDocs(index, numDocs);

logger.debug("--> creating snapshot [{}]", snapshot);
createSnapshot(client(), repository, snapshot, true);
Expand All @@ -87,7 +66,7 @@ public void testSearchableSnapshot() throws Exception {
if (VERSION_MINUS_1.equals(clusterVersion())) {
ensureGreen(index);

assertThat(indexLuceneVersion(index), equalTo(VERSION_MINUS_2));
assertThat(indexVersion(index), equalTo(VERSION_MINUS_2));
assertDocCount(client(), index, numDocs);

logger.debug("--> deleting index [{}]", index);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@
import java.util.function.Function;
import java.util.stream.Collectors;

import static org.elasticsearch.cluster.metadata.IndexMetadataVerifier.isReadOnlySupportedVersion;
import static org.elasticsearch.gateway.GatewayService.STATE_NOT_RECOVERED_BLOCK;

public class NodeJoinExecutor implements ClusterStateTaskExecutor<JoinTask> {
Expand Down Expand Up @@ -179,8 +180,12 @@ public ClusterState execute(BatchExecutionContext<JoinTask> batchExecutionContex
Set<String> newNodeEffectiveFeatures = enforceNodeFeatureBarrier(node, effectiveClusterFeatures, features);
// we do this validation quite late to prevent race conditions between nodes joining and importing dangling indices
// we have to reject nodes that don't support all indices we have in this cluster
ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), initialState.getMetadata());

ensureIndexCompatibility(
node.getMinIndexVersion(),
node.getMinReadOnlyIndexVersion(),
node.getMaxIndexVersion(),
initialState.getMetadata()
);
nodesBuilder.add(node);
compatibilityVersionsMap.put(node.getId(), compatibilityVersions);
// store the actual node features here, not including assumed features, as this is persisted in cluster state
Expand Down Expand Up @@ -396,7 +401,12 @@ private Set<String> calculateEffectiveClusterFeatures(DiscoveryNodes nodes, Map<
* @see IndexVersions#MINIMUM_COMPATIBLE
* @throws IllegalStateException if any index is incompatible with the given version
*/
public static void ensureIndexCompatibility(IndexVersion minSupportedVersion, IndexVersion maxSupportedVersion, Metadata metadata) {
public static void ensureIndexCompatibility(
IndexVersion minSupportedVersion,
IndexVersion minReadOnlySupportedVersion,
IndexVersion maxSupportedVersion,
Metadata metadata
) {
// we ensure that all indices in the cluster we join are compatible with us no matter if they are
// closed or not we can't read mappings of these indices so we need to reject the join...
for (IndexMetadata idxMetadata : metadata) {
Expand All @@ -411,14 +421,17 @@ public static void ensureIndexCompatibility(IndexVersion minSupportedVersion, In
);
}
if (idxMetadata.getCompatibilityVersion().before(minSupportedVersion)) {
throw new IllegalStateException(
"index "
+ idxMetadata.getIndex()
+ " version not supported: "
+ idxMetadata.getCompatibilityVersion().toReleaseVersion()
+ " minimum compatible index version is: "
+ minSupportedVersion.toReleaseVersion()
);
boolean isReadOnlySupported = isReadOnlySupportedVersion(idxMetadata, minSupportedVersion, minReadOnlySupportedVersion);
if (isReadOnlySupported == false) {
throw new IllegalStateException(
"index "
+ idxMetadata.getIndex()
+ " version not supported: "
+ idxMetadata.getCompatibilityVersion().toReleaseVersion()
+ " minimum compatible index version is: "
+ minSupportedVersion.toReleaseVersion()
);
}
}
}
}
Expand Down Expand Up @@ -542,7 +555,12 @@ public static Collection<BiConsumer<DiscoveryNode, ClusterState>> addBuiltInJoin
final Collection<BiConsumer<DiscoveryNode, ClusterState>> validators = new ArrayList<>();
validators.add((node, state) -> {
ensureNodesCompatibility(node.getVersion(), state.getNodes());
ensureIndexCompatibility(node.getMinIndexVersion(), node.getMaxIndexVersion(), state.getMetadata());
ensureIndexCompatibility(
node.getMinIndexVersion(),
node.getMinReadOnlyIndexVersion(),
node.getMaxIndexVersion(),
state.getMetadata()
);
});
validators.addAll(onJoinValidators);
return Collections.unmodifiableCollection(validators);
Expand Down
Loading