From 53076ab025d4165f577cf0e8526e0dfde81af72a Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Mon, 27 Oct 2025 17:23:49 +0000 Subject: [PATCH 01/13] Replace INDEX_METADATA_FORMAT with INDEX_SHARD_COUNT_FORMAT Replaces the existing ChecksumBlobStoreFormat INDEX_METADATA_FORMAT inside BlobStoreRepository with a new INDEX_SHARD_COUNT_FORMAT. This removes the need for writing an entire IndexMetadata object to, and reading from, heap memory. Instead, we write to and read from heap only the shard count for an index, reducing the total heap memory needed for snapshotting and reducing the likelihood a node will go OOMe. Closes ES-12539 Closes ES-12538 --- ...BlobStoreRepositoryOperationPurposeIT.java | 1 + .../cluster/metadata/IndexShardCount.java | 97 +++++++++++++++++++ .../blobstore/BlobStoreRepository.java | 35 ++++++- ...bStoreRepositoryDeleteThrottlingTests.java | 4 +- .../blobstore/RepositoryFileType.java | 1 + 5 files changed, 132 insertions(+), 6 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java index 51ec2d12b90c5..30eed7a543ec2 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java @@ -253,6 +253,7 @@ private static void assertPurposeConsistency(OperationPurpose purpose, String bl startsWith(BlobStoreRepository.INDEX_FILE_PREFIX), startsWith(BlobStoreRepository.METADATA_PREFIX), startsWith(BlobStoreRepository.SNAPSHOT_PREFIX), + startsWith(BlobStoreRepository.SHARD_COUNT_PREFIX), equalTo(BlobStoreRepository.INDEX_LATEST_BLOB), // verification equalTo("master.dat"), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java new file mode 100644 index 0000000000000..faa5c4e8283bc --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java @@ -0,0 +1,97 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.ToXContentFragment; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +/** + * Used to store the shard count for an index + * Prior to v9.3, the entire {@link IndexMetadata} object was stored in heap and then loaded during snapshotting to determine + * the shard count. As per ES-12539, this was replaced with {@link IndexShardCount} that tracks writes and loads only the index's + * shard count to and from heap memory. This not only reduces the likelihood of a node going OOMe, but increases snapshotting performance. + */ +public class IndexShardCount implements ToXContentFragment { + private static final String KEY_SHARD_COUNT = "shard_count"; + private final int shardCount; + + public IndexShardCount(int count) { + this.shardCount = count; + } + + public int getCount() { + return shardCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(KEY_SHARD_COUNT, shardCount); + return builder; + } + + public static Builder builder() { + return new Builder(); + } + + public static class Builder { + private int count; + + public Builder setCount(int count) { + this.count = count; + return this; + } + + public IndexShardCount build() { + return new IndexShardCount(count); + } + } + + public static IndexShardCount fromXContent(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token + parser.nextToken(); + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + XContentParser.Token currentToken = parser.nextToken(); + IndexShardCount x; + if (currentToken.isValue()) { + x = new IndexShardCount(parser.intValue()); + } else { + throw new IllegalArgumentException("Unexpected token " + currentToken); + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + return x; + } + + // public static final String SHARD_COUNT_FILE_PREFIX = "shard-count-"; + // /** + // * State format for {@link IndexMetadata} to only write shard count to and load from disk + // */ + // public static final MetadataStateFormat FORMAT = + // new MetadataStateFormat(SHARD_COUNT_FILE_PREFIX) { + // @Override + // public void toXContent(XContentBuilder builder, IndexShardCount indexShardCount) throws IOException { + // builder.startObject(); + // builder.field(KEY_SHARD_COUNT, indexShardCount.getCount()); + // builder.endObject(); + // } + // + // @Override + // public IndexShardCount fromXContent(XContentParser parser) throws IOException { + // return IndexShardCount.fromXContent(parser); + // } + // }; +} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index fdac63cc5466c..3dcdb8460ed05 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -39,6 +39,7 @@ import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexShardCount; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ProjectId; import org.elasticsearch.cluster.metadata.ProjectMetadata; @@ -240,6 +241,12 @@ private class ShutdownLogger { */ public static final String METADATA_PREFIX = "meta-"; + /** + * Name prefix for the blobs that hold the global {@link IndexShardCount} for each index + * @see #SHARD_COUNT_NAME_FORMAT + */ + public static final String SHARD_COUNT_PREFIX = "shard-count-"; + /** * Name prefix for the blobs that hold top-level {@link SnapshotInfo} and shard-level {@link BlobStoreIndexShardSnapshot} metadata. * @see #SNAPSHOT_NAME_FORMAT @@ -260,6 +267,12 @@ private class ShutdownLogger { */ public static final String METADATA_NAME_FORMAT = METADATA_PREFIX + "%s" + METADATA_BLOB_NAME_SUFFIX; + /** + * Blob name format for global {@link IndexShardCount} blobs. + * @see #INDEX_SHARD_COUNT_FORMAT + */ + public static final String SHARD_COUNT_NAME_FORMAT = SHARD_COUNT_PREFIX + "%s" + METADATA_BLOB_NAME_SUFFIX; + /** * Blob name format for top-level {@link SnapshotInfo} and shard-level {@link BlobStoreIndexShardSnapshot} blobs. * @see #SNAPSHOT_FORMAT @@ -398,6 +411,13 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { Function.identity() ); + public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( + "index-shard-count", + SHARD_COUNT_NAME_FORMAT, + (repoName, parser) -> IndexShardCount.fromXContent(parser), + Function.identity() + ); + private static final String SNAPSHOT_CODEC = "snapshot"; public static final ChecksumBlobStoreFormat SNAPSHOT_FORMAT = new ChecksumBlobStoreFormat<>( @@ -1327,13 +1347,14 @@ private void determineShardCount(ActionListener listener) { private void getOneShardCount(String indexMetaGeneration) { try { updateShardCount( - INDEX_METADATA_FORMAT.read(getProjectRepo(), indexContainer, indexMetaGeneration, namedXContentRegistry) - .getNumberOfShards() + INDEX_SHARD_COUNT_FORMAT.read(getProjectRepo(), indexContainer, indexMetaGeneration, namedXContentRegistry) + .getCount() ); } catch (Exception ex) { - logger.warn(() -> format("[%s] [%s] failed to read metadata for index", indexMetaGeneration, indexId.getName()), ex); + logger.warn(() -> format("[%s] [%s] failed to read shard count for index", indexMetaGeneration, indexId.getName()), ex); // Definitely indicates something fairly badly wrong with the repo, but not immediately fatal here: we might get the - // shard count from another metadata blob, or we might just not process these shards. If we skip these shards then the + // shard count from a subsequent indexMetaGeneration, or we might just not process these shards. If we skip these shards + // then the // repository will technically enter an invalid state (these shards' index-XXX blobs will refer to snapshots that no // longer exist) and may contain dangling blobs too. A subsequent delete that hits this index may repair the state if // the metadata read error is transient, but if not then the stale indices cleanup will eventually remove this index @@ -1904,21 +1925,25 @@ record RootBlobUpdateResult(RepositoryData oldRepositoryData, RepositoryData new } })); - // Write the index metadata for each index in the snapshot + // Write the index metadata and the shard count to memory for each index in the snapshot, so that it persists + // even if the repository is deleted for (IndexId index : indices) { executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> { final IndexMetadata indexMetaData = projectMetadata.index(index.getName()); + final IndexShardCount indexShardCount = new IndexShardCount(indexMetaData.getNumberOfShards()); if (writeIndexGens) { final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); if (metaUUID == null) { // We don't yet have this version of the metadata so we write it metaUUID = UUIDs.base64UUID(); + INDEX_SHARD_COUNT_FORMAT.write(indexShardCount, indexContainer(index), metaUUID, compress); INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); metadataWriteResult.indexMetaIdentifiers().put(identifiers, metaUUID); } // else this task was largely a no-op - TODO no need to fork in that case metadataWriteResult.indexMetas().put(index, identifiers); } else { + INDEX_SHARD_COUNT_FORMAT.write(indexShardCount, indexContainer(index), snapshotId.getUUID(), compress); INDEX_METADATA_FORMAT.write( clusterMetadata.getProject(getProjectId()).index(index.getName()), indexContainer(index), diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java index 0d8011830dcec..082754f55f1a3 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java @@ -133,7 +133,9 @@ protected BlobContainer wrapChild(BlobContainer child) { @Override public InputStream readBlob(OperationPurpose purpose, String blobName) throws IOException { final var pathParts = path().parts(); - if (pathParts.size() == 2 && pathParts.get(0).equals("indices") && blobName.startsWith(BlobStoreRepository.METADATA_PREFIX)) { + if (pathParts.size() == 2 + && pathParts.get(0).equals("indices") + && blobName.startsWith(BlobStoreRepository.SHARD_COUNT_PREFIX)) { // reading index metadata, so mark index as active assertTrue(activeIndices.add(pathParts.get(1))); assertThat(activeIndices.size(), lessThanOrEqualTo(MAX_SNAPSHOT_THREADS)); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java index a30d14efb5d39..cb1ba8875fa88 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java @@ -25,6 +25,7 @@ public enum RepositoryFileType { SNAPSHOT_INFO("snap-UUID.dat"), GLOBAL_METADATA("meta-UUID.dat"), INDEX_METADATA("indices/UUID/meta-SHORTUUID.dat"), + INDEX_SHARD_COUNT("indices/UUID/shard-count-SHORTUUID.dat"), SHARD_GENERATION("indices/UUID/NUM/index-UUID"), SHARD_SNAPSHOT_INFO("indices/UUID/NUM/snap-UUID.dat"), SHARD_DATA("indices/UUID/NUM/__UUID"), From ddd46e53dd529c1ab16d37064729c657bf046289 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Mon, 27 Oct 2025 17:28:16 +0000 Subject: [PATCH 02/13] Remove unused code --- .../cluster/metadata/IndexShardCount.java | 19 ------------------- 1 file changed, 19 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java index faa5c4e8283bc..286a31005a5f8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java @@ -75,23 +75,4 @@ public static IndexShardCount fromXContent(XContentParser parser) throws IOExcep XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); return x; } - - // public static final String SHARD_COUNT_FILE_PREFIX = "shard-count-"; - // /** - // * State format for {@link IndexMetadata} to only write shard count to and load from disk - // */ - // public static final MetadataStateFormat FORMAT = - // new MetadataStateFormat(SHARD_COUNT_FILE_PREFIX) { - // @Override - // public void toXContent(XContentBuilder builder, IndexShardCount indexShardCount) throws IOException { - // builder.startObject(); - // builder.field(KEY_SHARD_COUNT, indexShardCount.getCount()); - // builder.endObject(); - // } - // - // @Override - // public IndexShardCount fromXContent(XContentParser parser) throws IOException { - // return IndexShardCount.fromXContent(parser); - // } - // }; } From 9818db7c4e8f943ff1655d72898afb00b0821d25 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Mon, 27 Oct 2025 17:30:07 +0000 Subject: [PATCH 03/13] Update docs/changelog/137210.yaml --- docs/changelog/137210.yaml | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 docs/changelog/137210.yaml diff --git a/docs/changelog/137210.yaml b/docs/changelog/137210.yaml new file mode 100644 index 0000000000000..bdd27bdeb0e5b --- /dev/null +++ b/docs/changelog/137210.yaml @@ -0,0 +1,5 @@ +pr: 137210 +summary: "[WIP] Introduce INDEX_SHARD_COUNT_FORMAT" +area: Snapshot/Restore +type: bug +issues: [] From 133f3f24f31faa2c3ba6e2916ba46837662d3410 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Tue, 28 Oct 2025 16:20:17 +0000 Subject: [PATCH 04/13] Introduces a new reader to only read the shard count --- ...BlobStoreRepositoryOperationPurposeIT.java | 1 - .../cluster/metadata/IndexMetadata.java | 123 +++++++- .../cluster/metadata/IndexShardCount.java | 78 ----- .../blobstore/BlobStoreRepository.java | 25 +- .../cluster/metadata/IndexMetadataTests.java | 291 +++++++++++------- ...bStoreRepositoryDeleteThrottlingTests.java | 4 +- .../blobstore/RepositoryFileType.java | 1 - 7 files changed, 307 insertions(+), 216 deletions(-) delete mode 100644 server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java diff --git a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java index 30eed7a543ec2..51ec2d12b90c5 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryOperationPurposeIT.java @@ -253,7 +253,6 @@ private static void assertPurposeConsistency(OperationPurpose purpose, String bl startsWith(BlobStoreRepository.INDEX_FILE_PREFIX), startsWith(BlobStoreRepository.METADATA_PREFIX), startsWith(BlobStoreRepository.SNAPSHOT_PREFIX), - startsWith(BlobStoreRepository.SHARD_COUNT_PREFIX), equalTo(BlobStoreRepository.INDEX_LATEST_BLOB), // verification equalTo("master.dat"), diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 38d0fada7d866..7b6aa1f446c3f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -2881,13 +2881,7 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE } else if (token == XContentParser.Token.START_OBJECT) { if ("settings".equals(currentFieldName)) { Settings settings = Settings.fromXContent(parser); - if (SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).isLegacyIndexVersion() == false) { - throw new IllegalStateException( - "this method should only be used to parse older incompatible index metadata versions " - + "but got " - + SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).toReleaseVersion() - ); - } + checkSettingIndexVersionCompatibility(settings); builder.settings(settings); } else if ("mappings".equals(currentFieldName)) { Map mappingSourceBuilder = new HashMap<>(); @@ -2980,6 +2974,16 @@ private static void handleLegacyMapping(Builder builder, Map map } } + private static void checkSettingIndexVersionCompatibility(Settings settings) { + if (SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).isLegacyIndexVersion() == false) { + throw new IllegalStateException( + "this method should only be used to parse older incompatible index metadata versions " + + "but got " + + SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).toReleaseVersion() + ); + } + } + /** * Return the {@link IndexVersion} of Elasticsearch that has been used to create an index given its settings. * @@ -3228,4 +3232,109 @@ public static int parseIndexNameCounter(String indexName) { throw new IllegalArgumentException("unable to parse the index name [" + indexName + "] to extract the counter", e); } } + + /** + * A subset of {@link IndexMetadata} storing only the shard count of an index + * Prior to v9.3, the entire {@link IndexMetadata} object was stored in heap and then loaded during snapshotting to determine + * the shard count. As per ES-12539, this is replaced with the {@link IndexShardCount} class that writes and loads only the index's + * shard count to and from heap memory, reducing the possibility of smaller nodes going OOMe during snapshotting + */ + public static class IndexShardCount implements ToXContentFragment { + private static final String KEY_SHARD_COUNT = "shard_count"; + private final int shardCount; + + public IndexShardCount(int count) { + this.shardCount = count; + } + + public int getCount() { + return shardCount; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.field(KEY_SHARD_COUNT, shardCount); + return builder; + } + + public static IndexShardCount.Builder builder() { + return new IndexShardCount.Builder(); + } + + public static class Builder { + private int count; + + public IndexShardCount.Builder setCount(int count) { + this.count = count; + return this; + } + + public IndexShardCount build() { + return new IndexShardCount(count); + } + } + + /** + * Parses an {@link IndexMetadata} object, reading only the shard count and skipping the rest + * @param parser The parser of the {@link IndexMetadata} object + * @return Returns an {@link IndexShardCount} containing the shard count for the index + * @throws IOException Thrown if the {@link IndexMetadata} object cannot be parsed correctly + */ + public static IndexShardCount fromIndexMetaData(XContentParser parser) throws IOException { + return fromIndexMetaData(parser, false); + } + + public static IndexShardCount fromIndexMetaData(XContentParser parser, boolean legacy) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token + parser.nextToken(); + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName; + XContentParser.Token token = parser.nextToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + Builder indexShardCountBuilder = new Builder(); + // Skip over everything except the settings object we care about, or any unexpected tokens + while ((currentFieldName = parser.nextFieldName()) != null) { + token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + if (currentFieldName.equals(KEY_SETTINGS)) { + Settings settings = Settings.fromXContent(parser); + if (legacy) { + checkSettingIndexVersionCompatibility(settings); + } + indexShardCountBuilder.setCount(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1)); + } else { + // Iterate through the object, but we don't care for it's contents + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + } + } + } else if (token == XContentParser.Token.START_ARRAY) { + // Iterate through the array, but we don't care for it's contents + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + } + } else if (token.isValue() == false) { + throw new IllegalArgumentException("Unexpected token " + token); + } + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + return indexShardCountBuilder.build(); + } + + /** + * Parses legacy metadata from ES versions that are no longer index-compatible, returning information on best-effort basis. + *

+ * Like {@link #fromIndexMetaData}, we are parsing an {@link IndexMetadata} object, + * reading only the shard count and skipping the rest. + *

+ * Throws an exception if the metadata is index-compatible with the current version (in that case, + * {@link #fromXContent} should be used to load the content. + */ + public static IndexShardCount fromLegacyIndexMetaData(XContentParser parser) throws IOException { + return fromIndexMetaData(parser, true); + } + } } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java deleted file mode 100644 index 286a31005a5f8..0000000000000 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexShardCount.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.cluster.metadata; - -import org.elasticsearch.common.xcontent.XContentParserUtils; -import org.elasticsearch.xcontent.ToXContentFragment; -import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentParser; - -import java.io.IOException; - -/** - * Used to store the shard count for an index - * Prior to v9.3, the entire {@link IndexMetadata} object was stored in heap and then loaded during snapshotting to determine - * the shard count. As per ES-12539, this was replaced with {@link IndexShardCount} that tracks writes and loads only the index's - * shard count to and from heap memory. This not only reduces the likelihood of a node going OOMe, but increases snapshotting performance. - */ -public class IndexShardCount implements ToXContentFragment { - private static final String KEY_SHARD_COUNT = "shard_count"; - private final int shardCount; - - public IndexShardCount(int count) { - this.shardCount = count; - } - - public int getCount() { - return shardCount; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(KEY_SHARD_COUNT, shardCount); - return builder; - } - - public static Builder builder() { - return new Builder(); - } - - public static class Builder { - private int count; - - public Builder setCount(int count) { - this.count = count; - return this; - } - - public IndexShardCount build() { - return new IndexShardCount(count); - } - } - - public static IndexShardCount fromXContent(XContentParser parser) throws IOException { - if (parser.currentToken() == null) { // fresh parser? move to the first token - parser.nextToken(); - } - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token - parser.nextToken(); - } - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); - XContentParser.Token currentToken = parser.nextToken(); - IndexShardCount x; - if (currentToken.isValue()) { - x = new IndexShardCount(parser.intValue()); - } else { - throw new IllegalArgumentException("Unexpected token " + currentToken); - } - XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - return x; - } -} diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 3dcdb8460ed05..5fe23df2283f1 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -39,7 +39,6 @@ import org.elasticsearch.cluster.SnapshotDeletionsInProgress; import org.elasticsearch.cluster.SnapshotsInProgress; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.cluster.metadata.IndexShardCount; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.ProjectId; import org.elasticsearch.cluster.metadata.ProjectMetadata; @@ -241,12 +240,6 @@ private class ShutdownLogger { */ public static final String METADATA_PREFIX = "meta-"; - /** - * Name prefix for the blobs that hold the global {@link IndexShardCount} for each index - * @see #SHARD_COUNT_NAME_FORMAT - */ - public static final String SHARD_COUNT_PREFIX = "shard-count-"; - /** * Name prefix for the blobs that hold top-level {@link SnapshotInfo} and shard-level {@link BlobStoreIndexShardSnapshot} metadata. * @see #SNAPSHOT_NAME_FORMAT @@ -267,12 +260,6 @@ private class ShutdownLogger { */ public static final String METADATA_NAME_FORMAT = METADATA_PREFIX + "%s" + METADATA_BLOB_NAME_SUFFIX; - /** - * Blob name format for global {@link IndexShardCount} blobs. - * @see #INDEX_SHARD_COUNT_FORMAT - */ - public static final String SHARD_COUNT_NAME_FORMAT = SHARD_COUNT_PREFIX + "%s" + METADATA_BLOB_NAME_SUFFIX; - /** * Blob name format for top-level {@link SnapshotInfo} and shard-level {@link BlobStoreIndexShardSnapshot} blobs. * @see #SNAPSHOT_FORMAT @@ -411,10 +398,11 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { Function.identity() ); - public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( - "index-shard-count", - SHARD_COUNT_NAME_FORMAT, - (repoName, parser) -> IndexShardCount.fromXContent(parser), + public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( + "index-metadata", + METADATA_NAME_FORMAT, + (repoName, parser) -> IndexMetadata.IndexShardCount.fromLegacyIndexMetaData(parser), + (repoName, parser) -> IndexMetadata.IndexShardCount.fromIndexMetaData(parser), Function.identity() ); @@ -1930,20 +1918,17 @@ record RootBlobUpdateResult(RepositoryData oldRepositoryData, RepositoryData new for (IndexId index : indices) { executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> { final IndexMetadata indexMetaData = projectMetadata.index(index.getName()); - final IndexShardCount indexShardCount = new IndexShardCount(indexMetaData.getNumberOfShards()); if (writeIndexGens) { final String identifiers = IndexMetaDataGenerations.buildUniqueIdentifier(indexMetaData); String metaUUID = existingRepositoryData.indexMetaDataGenerations().getIndexMetaBlobId(identifiers); if (metaUUID == null) { // We don't yet have this version of the metadata so we write it metaUUID = UUIDs.base64UUID(); - INDEX_SHARD_COUNT_FORMAT.write(indexShardCount, indexContainer(index), metaUUID, compress); INDEX_METADATA_FORMAT.write(indexMetaData, indexContainer(index), metaUUID, compress); metadataWriteResult.indexMetaIdentifiers().put(identifiers, metaUUID); } // else this task was largely a no-op - TODO no need to fork in that case metadataWriteResult.indexMetas().put(index, identifiers); } else { - INDEX_SHARD_COUNT_FORMAT.write(indexShardCount, indexContainer(index), snapshotId.getUUID(), compress); INDEX_METADATA_FORMAT.write( clusterMetadata.getProject(getProjectId()).index(index.getName()), indexContainer(index), diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index d0aae463dd193..50d0521111335 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -41,6 +41,7 @@ import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; @@ -55,6 +56,8 @@ import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; +import static org.elasticsearch.cluster.metadata.IndexMetadata.KEY_SETTINGS; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetadata.parseIndexNameCounter; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING; @@ -85,55 +88,9 @@ protected NamedXContentRegistry xContentRegistry() { @SuppressForbidden(reason = "Use IndexMetadata#getForecastedWriteLoad to ensure that the serialized value is correct") public void testIndexMetadataSerialization() throws IOException { - Integer numShard = randomFrom(1, 2, 4, 8, 16); - int numberOfReplicas = randomIntBetween(0, 10); final boolean system = randomBoolean(); - Map customMap = new HashMap<>(); - customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); - customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); - IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); - IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; - Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; - Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; - Map inferenceFields = randomInferenceFields(); - IndexReshardingMetadata reshardingMetadata = randomBoolean() ? randomIndexReshardingMetadata(numShard) : null; - - IndexMetadata metadata = IndexMetadata.builder("foo") - .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) - .creationDate(randomLong()) - .primaryTerm(0, 2) - .setRoutingNumShards(32) - .system(system) - .putCustom("my_custom", customMap) - .putRolloverInfo( - new RolloverInfo( - randomAlphaOfLength(5), - List.of( - new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), - new MaxDocsCondition(randomNonNegativeLong()), - new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), - new OptimalShardCountCondition(3) - ), - randomNonNegativeLong() - ) - ) - .mappingsUpdatedVersion(mappingsUpdatedVersion) - .stats(indexStats) - .indexWriteLoadForecast(indexWriteLoadForecast) - .shardSizeInBytesForecast(shardSizeInBytesForecast) - .putInferenceFields(inferenceFields) - .eventIngestedRange( - randomFrom( - IndexLongFieldRange.UNKNOWN, - IndexLongFieldRange.EMPTY, - IndexLongFieldRange.NO_SHARDS, - IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) - ) - ) - .reshardingMetadata(reshardingMetadata) - .build(); + Map customMap = randomCustomMap(); + IndexMetadata metadata = randomIndexMetadata(system, customMap); assertEquals(system, metadata.isSystem()); final XContentBuilder builder = JsonXContent.contentBuilder(); @@ -201,51 +158,9 @@ public void testIndexMetadataSerialization() throws IOException { } public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() throws IOException { - Integer numShard = randomFrom(1, 2, 4, 8, 16); - int numberOfReplicas = randomIntBetween(0, 10); final boolean system = randomBoolean(); - Map customMap = new HashMap<>(); - customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); - customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); - IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; - Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; - Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; - Map inferenceFields = randomInferenceFields(); - - IndexMetadata metadata = IndexMetadata.builder("foo") - .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) - .creationDate(randomLong()) - .primaryTerm(0, 2) - .setRoutingNumShards(32) - .system(system) - .putCustom("my_custom", customMap) - .putRolloverInfo( - new RolloverInfo( - randomAlphaOfLength(5), - List.of( - new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), - new MaxDocsCondition(randomNonNegativeLong()), - new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), - new OptimalShardCountCondition(3) - ), - randomNonNegativeLong() - ) - ) - .stats(indexStats) - .indexWriteLoadForecast(indexWriteLoadForecast) - .shardSizeInBytesForecast(shardSizeInBytesForecast) - .putInferenceFields(inferenceFields) - .eventIngestedRange( - randomFrom( - IndexLongFieldRange.UNKNOWN, - IndexLongFieldRange.EMPTY, - IndexLongFieldRange.NO_SHARDS, - IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) - ) - ) - .build(); + Map customMap = randomCustomMap(); + IndexMetadata metadata = randomIndexMetadata(system, customMap); assertEquals(system, metadata.isSystem()); final XContentBuilder builder = JsonXContent.contentBuilder(); @@ -256,13 +171,7 @@ public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() thro // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); - - @SuppressWarnings("unchecked") - Map inner = (Map) indexMetadataMap.get("foo"); - assertTrue(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); - inner.remove(IndexMetadata.KEY_EVENT_INGESTED_RANGE); - // validate that the IndexMetadata.KEY_EVENT_INGESTED_RANGE has been removed before calling fromXContent - assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + removeEventIngestedField(indexMetadataMap); IndexMetadata fromXContentMeta; XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()) @@ -468,7 +377,7 @@ public void testNumberOfShardsIsNotNegative() { } private void runTestNumberOfShardsIsPositive(final int numberOfShards) { - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards).build(); + final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build(); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> IndexMetadata.builder("test").settings(settings).build() @@ -480,10 +389,7 @@ private void runTestNumberOfShardsIsPositive(final int numberOfShards) { } public void testMissingCreatedVersion() { - Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) - .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) - .build(); + Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> IndexMetadata.builder("test").settings(settings).build() @@ -492,7 +398,7 @@ public void testMissingCreatedVersion() { } public void testMissingNumberOfReplicas() { - final Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)).build(); + final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)).build(); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> IndexMetadata.builder("test").settings(settings).build() @@ -503,7 +409,7 @@ public void testMissingNumberOfReplicas() { public void testNumberOfReplicasIsNonNegative() { final int numberOfReplicas = -randomIntBetween(1, Integer.MAX_VALUE); final Settings settings = Settings.builder() - .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)) + .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) .build(); final IllegalArgumentException e = expectThrows( @@ -698,6 +604,179 @@ public void testReshardingBWCSerialization() throws IOException { assertEquals(idx, IndexMetadata.builder(deserialized).reshardingMetadata(reshardingMetadata).build()); } + public void testIndexShardCountToXContent() throws IOException { + int expectedShards = randomIntBetween(0, 100); + IndexMetadata.IndexShardCount count = new IndexMetadata.IndexShardCount(expectedShards); + XContentBuilder builder = XContentFactory.jsonBuilder(); + builder.startObject(); + count.toXContent(builder, null); + builder.endObject(); + assertTrue(Strings.toString(builder).contains("\"shard_count\":" + expectedShards)); + } + + public void testIndexShardCountBuilder() { + int expectedShards = randomIntBetween(0, 100); + IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.builder().setCount(expectedShards).build(); + assertEquals(expectedShards, count.getCount()); + } + + public void testFromIndexMetaDataWithValidIndexMetaDataObject() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + IndexMetadata indexMetadata = randomIndexMetadata(numberOfShards); + + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + IndexMetadata.FORMAT.toXContent(builder, indexMetadata); + builder.endObject(); + XContentParser parser = createParser(builder); + + IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromIndexMetaData(parser); + assertEquals(numberOfShards, count.getCount()); + } + + public void testFromIndexMetaDataWithValidIndexMetaDataObjectWithoutEventIngestedField() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + IndexMetadata indexMetadata = randomIndexMetadata(numberOfShards); + + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + IndexMetadata.FORMAT.toXContent(builder, indexMetadata); + builder.endObject(); + XContentParser parser = createParser(builder); + + // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry + // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); + removeEventIngestedField(indexMetadataMap); + + IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromIndexMetaData(parser); + assertEquals(numberOfShards, count.getCount()); + } + + public void testFromIndexMetaDataWithoutNumberOfShardsSettingReturnsNegativeOne() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .field("my_index") + .startObject() + .startObject(KEY_SETTINGS) + // no shard count + .endObject() + .endObject() + .endObject(); + XContentParser parser = createParser(builder); + + IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromIndexMetaData(parser); + assertEquals(-1, count.getCount()); + } + + public void testFromLegacyIndexMetaDataWithNewCompatibleVersionThrowsException() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata(numberOfShards, IndexVersion.current()); + XContentParser parser = createParser(indexMetadataBuilder); + assertThrows(IllegalStateException.class, () -> IndexMetadata.IndexShardCount.fromLegacyIndexMetaData(parser)); + } + + public void testFromLegacyIndexMetaDataWithOldIncompatibleVersionSucceeds() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata( + numberOfShards, + IndexVersion.getMinimumCompatibleIndexVersion(1_000_000) + ); + XContentParser parser = createParser(indexMetadataBuilder); + IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromLegacyIndexMetaData(parser); + assertEquals(numberOfShards, count.getCount()); + } + + private XContentBuilder buildLegacyIndexMetadata(int numberOfShards, IndexVersion compatibilityVersion) throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field("my_index") + .startObject() + .startObject(KEY_SETTINGS) + .field(SETTING_NUMBER_OF_SHARDS, numberOfShards) + .field("index.version.compatibility", compatibilityVersion) + .endObject() + .endObject() + .endObject(); + } + + private Map randomCustomMap() { + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); + return customMap; + } + + private IndexMetadata randomIndexMetadata(int numberOfShards) { + final boolean system = randomBoolean(); + Map customMap = randomCustomMap(); + return randomIndexMetadata(numberOfShards, system, customMap); + } + + private IndexMetadata randomIndexMetadata(boolean system, Map customMap) { + return randomIndexMetadata(randomFrom(1, 2, 4, 8, 16), system, customMap); + } + + private IndexMetadata randomIndexMetadata(int numberOfShards, boolean system, Map customMap) { + int numberOfReplicas = randomIntBetween(0, 10); + IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); + IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numberOfShards) : null; + Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; + Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; + Map inferenceFields = randomInferenceFields(); + IndexReshardingMetadata reshardingMetadata = randomBoolean() ? randomIndexReshardingMetadata(numberOfShards) : null; + + return IndexMetadata.builder("foo") + .settings(indexSettings(numberOfShards, numberOfReplicas).put("index.version.created", 1)) + .creationDate(randomLong()) + .primaryTerm(0, 2) + .setRoutingNumShards(32) + .system(system) + .putCustom("my_custom", customMap) + .putRolloverInfo( + new RolloverInfo( + randomAlphaOfLength(5), + List.of( + new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong()), + new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), + new OptimalShardCountCondition(3) + ), + randomNonNegativeLong() + ) + ) + .mappingsUpdatedVersion(mappingsUpdatedVersion) + .stats(indexStats) + .indexWriteLoadForecast(indexWriteLoadForecast) + .shardSizeInBytesForecast(shardSizeInBytesForecast) + .putInferenceFields(inferenceFields) + .eventIngestedRange( + randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) + ) + ) + .reshardingMetadata(reshardingMetadata) + .build(); + } + + private void removeEventIngestedField(Map indexMetadataMap) { + // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry + // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + // Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); + + @SuppressWarnings("unchecked") + Map inner = (Map) indexMetadataMap.get("foo"); + assertTrue(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + inner.remove(IndexMetadata.KEY_EVENT_INGESTED_RANGE); + // validate that the IndexMetadata.KEY_EVENT_INGESTED_RANGE has been removed before calling fromXContent + assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + } + private IndexMetadata roundTripWithVersion(IndexMetadata indexMetadata, TransportVersion version) throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { out.setTransportVersion(version); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java index 082754f55f1a3..0d8011830dcec 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryDeleteThrottlingTests.java @@ -133,9 +133,7 @@ protected BlobContainer wrapChild(BlobContainer child) { @Override public InputStream readBlob(OperationPurpose purpose, String blobName) throws IOException { final var pathParts = path().parts(); - if (pathParts.size() == 2 - && pathParts.get(0).equals("indices") - && blobName.startsWith(BlobStoreRepository.SHARD_COUNT_PREFIX)) { + if (pathParts.size() == 2 && pathParts.get(0).equals("indices") && blobName.startsWith(BlobStoreRepository.METADATA_PREFIX)) { // reading index metadata, so mark index as active assertTrue(activeIndices.add(pathParts.get(1))); assertThat(activeIndices.size(), lessThanOrEqualTo(MAX_SNAPSHOT_THREADS)); diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java index cb1ba8875fa88..a30d14efb5d39 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/RepositoryFileType.java @@ -25,7 +25,6 @@ public enum RepositoryFileType { SNAPSHOT_INFO("snap-UUID.dat"), GLOBAL_METADATA("meta-UUID.dat"), INDEX_METADATA("indices/UUID/meta-SHORTUUID.dat"), - INDEX_SHARD_COUNT("indices/UUID/shard-count-SHORTUUID.dat"), SHARD_GENERATION("indices/UUID/NUM/index-UUID"), SHARD_SNAPSHOT_INFO("indices/UUID/NUM/snap-UUID.dat"), SHARD_DATA("indices/UUID/NUM/__UUID"), From a584d0f67a6ce7bfce380cb4df436e553b264c7b Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Wed, 29 Oct 2025 15:19:19 +0000 Subject: [PATCH 05/13] Comments --- .../cluster/metadata/IndexMetadata.java | 109 +------ .../blobstore/BlobStoreRepository.java | 18 +- .../blobstore/IndexShardCount.java | 69 ++++ .../cluster/metadata/IndexMetadataTests.java | 296 +++++++----------- .../blobstore/IndexShardCountTests.java | 265 ++++++++++++++++ 5 files changed, 453 insertions(+), 304 deletions(-) create mode 100644 server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java create mode 100644 server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index 7b6aa1f446c3f..cfc5b88ef58ef 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -587,7 +587,7 @@ public Iterator> settings() { static final String KEY_SETTINGS_VERSION = "settings_version"; static final String KEY_ALIASES_VERSION = "aliases_version"; static final String KEY_ROUTING_NUM_SHARDS = "routing_num_shards"; - static final String KEY_SETTINGS = "settings"; + public static final String KEY_SETTINGS = "settings"; static final String KEY_STATE = "state"; static final String KEY_MAPPINGS = "mappings"; static final String KEY_MAPPINGS_HASH = "mappings_hash"; @@ -596,7 +596,7 @@ public Iterator> settings() { static final String KEY_MAPPINGS_UPDATED_VERSION = "mappings_updated_version"; static final String KEY_SYSTEM = "system"; static final String KEY_TIMESTAMP_RANGE = "timestamp_range"; - static final String KEY_EVENT_INGESTED_RANGE = "event_ingested_range"; + public static final String KEY_EVENT_INGESTED_RANGE = "event_ingested_range"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; public static final String KEY_STATS = "stats"; @@ -3232,109 +3232,4 @@ public static int parseIndexNameCounter(String indexName) { throw new IllegalArgumentException("unable to parse the index name [" + indexName + "] to extract the counter", e); } } - - /** - * A subset of {@link IndexMetadata} storing only the shard count of an index - * Prior to v9.3, the entire {@link IndexMetadata} object was stored in heap and then loaded during snapshotting to determine - * the shard count. As per ES-12539, this is replaced with the {@link IndexShardCount} class that writes and loads only the index's - * shard count to and from heap memory, reducing the possibility of smaller nodes going OOMe during snapshotting - */ - public static class IndexShardCount implements ToXContentFragment { - private static final String KEY_SHARD_COUNT = "shard_count"; - private final int shardCount; - - public IndexShardCount(int count) { - this.shardCount = count; - } - - public int getCount() { - return shardCount; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(KEY_SHARD_COUNT, shardCount); - return builder; - } - - public static IndexShardCount.Builder builder() { - return new IndexShardCount.Builder(); - } - - public static class Builder { - private int count; - - public IndexShardCount.Builder setCount(int count) { - this.count = count; - return this; - } - - public IndexShardCount build() { - return new IndexShardCount(count); - } - } - - /** - * Parses an {@link IndexMetadata} object, reading only the shard count and skipping the rest - * @param parser The parser of the {@link IndexMetadata} object - * @return Returns an {@link IndexShardCount} containing the shard count for the index - * @throws IOException Thrown if the {@link IndexMetadata} object cannot be parsed correctly - */ - public static IndexShardCount fromIndexMetaData(XContentParser parser) throws IOException { - return fromIndexMetaData(parser, false); - } - - public static IndexShardCount fromIndexMetaData(XContentParser parser, boolean legacy) throws IOException { - if (parser.currentToken() == null) { // fresh parser? move to the first token - parser.nextToken(); - } - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token - parser.nextToken(); - } - XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); - String currentFieldName; - XContentParser.Token token = parser.nextToken(); - XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - - Builder indexShardCountBuilder = new Builder(); - // Skip over everything except the settings object we care about, or any unexpected tokens - while ((currentFieldName = parser.nextFieldName()) != null) { - token = parser.nextToken(); - if (token == XContentParser.Token.START_OBJECT) { - if (currentFieldName.equals(KEY_SETTINGS)) { - Settings settings = Settings.fromXContent(parser); - if (legacy) { - checkSettingIndexVersionCompatibility(settings); - } - indexShardCountBuilder.setCount(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1)); - } else { - // Iterate through the object, but we don't care for it's contents - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - } - } - } else if (token == XContentParser.Token.START_ARRAY) { - // Iterate through the array, but we don't care for it's contents - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - } - } else if (token.isValue() == false) { - throw new IllegalArgumentException("Unexpected token " + token); - } - } - XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - return indexShardCountBuilder.build(); - } - - /** - * Parses legacy metadata from ES versions that are no longer index-compatible, returning information on best-effort basis. - *

- * Like {@link #fromIndexMetaData}, we are parsing an {@link IndexMetadata} object, - * reading only the shard count and skipping the rest. - *

- * Throws an exception if the metadata is index-compatible with the current version (in that case, - * {@link #fromXContent} should be used to load the content. - */ - public static IndexShardCount fromLegacyIndexMetaData(XContentParser parser) throws IOException { - return fromIndexMetaData(parser, true); - } - } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 5fe23df2283f1..14ccec0692904 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -398,12 +398,14 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { Function.identity() ); - public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( - "index-metadata", + public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( + "shard-count", METADATA_NAME_FORMAT, - (repoName, parser) -> IndexMetadata.IndexShardCount.fromLegacyIndexMetaData(parser), - (repoName, parser) -> IndexMetadata.IndexShardCount.fromIndexMetaData(parser), - Function.identity() + (repoName, parser) -> IndexShardCount.fromIndexMetaData(parser), + (ignored) -> { + assert false; + throw new UnsupportedOperationException(); + } ); private static final String SNAPSHOT_CODEC = "snapshot"; @@ -1335,8 +1337,7 @@ private void determineShardCount(ActionListener listener) { private void getOneShardCount(String indexMetaGeneration) { try { updateShardCount( - INDEX_SHARD_COUNT_FORMAT.read(getProjectRepo(), indexContainer, indexMetaGeneration, namedXContentRegistry) - .getCount() + INDEX_SHARD_COUNT_FORMAT.read(getProjectRepo(), indexContainer, indexMetaGeneration, namedXContentRegistry).count() ); } catch (Exception ex) { logger.warn(() -> format("[%s] [%s] failed to read shard count for index", indexMetaGeneration, indexId.getName()), ex); @@ -1913,8 +1914,7 @@ record RootBlobUpdateResult(RepositoryData oldRepositoryData, RepositoryData new } })); - // Write the index metadata and the shard count to memory for each index in the snapshot, so that it persists - // even if the repository is deleted + // Write the index metadata for each index in the snapshot for (IndexId index : indices) { executor.execute(ActionRunnable.run(allMetaListeners.acquire(), () -> { final IndexMetadata indexMetaData = projectMetadata.index(index.getName()); diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java new file mode 100644 index 0000000000000..f4795513c640c --- /dev/null +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java @@ -0,0 +1,69 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.blobstore; + +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.XContentParserUtils; +import org.elasticsearch.xcontent.XContentParser; + +import java.io.IOException; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.KEY_SETTINGS; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; + +/** + * A subset of {@link IndexMetadata} storing only the shard count of an index + * Prior to v9.3, the entire {@link IndexMetadata} object was stored in heap and then loaded during snapshotting to determine + * the shard count. As per ES-12539, this is replaced with the {@link IndexShardCount} class that writes and loads only the index's + * shard count to and from heap memory, reducing the possibility of smaller nodes going OOMe during snapshotting + */ +public record IndexShardCount(int count) { + /** + * Parses an {@link IndexMetadata} object, reading only the shard count and skipping the rest + * @param parser The parser of the {@link IndexMetadata} object + * @return Returns an {@link IndexShardCount} containing the shard count for the index + * @throws IOException Thrown if the {@link IndexMetadata} object cannot be parsed correctly + */ + public static IndexShardCount fromIndexMetaData(XContentParser parser) throws IOException { + if (parser.currentToken() == null) { // fresh parser? move to the first token + parser.nextToken(); + } + if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token + parser.nextToken(); + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); + String currentFieldName; + XContentParser.Token token = parser.nextToken(); + XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); + + IndexShardCount indexShardCount = new IndexShardCount(-1); + // Skip over everything except the settings object we care about, or any unexpected tokens + while ((currentFieldName = parser.nextFieldName()) != null) { + token = parser.nextToken(); + if (token == XContentParser.Token.START_OBJECT) { + if (currentFieldName.equals(KEY_SETTINGS)) { + Settings settings = Settings.fromXContent(parser); + indexShardCount = new IndexShardCount(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1)); + } else { + // Iterate through the object, but we don't care for it's contents + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {} + } + } else if (token == XContentParser.Token.START_ARRAY) { + // Iterate through the array, but we don't care for it's contents + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {} + } else if (token.isValue() == false) { + throw new IllegalArgumentException("Unexpected token " + token); + } + } + XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); + return indexShardCount; + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 50d0521111335..39da67a81bec0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -10,6 +10,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; @@ -41,7 +42,6 @@ import org.elasticsearch.test.index.IndexVersionUtils; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.XContentBuilder; -import org.elasticsearch.xcontent.XContentFactory; import org.elasticsearch.xcontent.XContentParser; import org.elasticsearch.xcontent.XContentParserConfiguration; import org.elasticsearch.xcontent.XContentType; @@ -56,8 +56,6 @@ import java.util.Set; import static org.elasticsearch.cluster.metadata.IndexMetadata.INDEX_HIDDEN_SETTING; -import static org.elasticsearch.cluster.metadata.IndexMetadata.KEY_SETTINGS; -import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.cluster.metadata.IndexMetadata.parseIndexNameCounter; import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; import static org.elasticsearch.snapshots.SearchableSnapshotsSettings.SNAPSHOT_PARTIAL_SETTING; @@ -69,8 +67,6 @@ public class IndexMetadataTests extends ESTestCase { - private static final TransportVersion ESQL_FAILURE_FROM_REMOTE = TransportVersion.fromName("esql_failure_from_remote"); - @Before public void setUp() throws Exception { super.setUp(); @@ -88,9 +84,55 @@ protected NamedXContentRegistry xContentRegistry() { @SuppressForbidden(reason = "Use IndexMetadata#getForecastedWriteLoad to ensure that the serialized value is correct") public void testIndexMetadataSerialization() throws IOException { + Integer numShard = randomFrom(1, 2, 4, 8, 16); + int numberOfReplicas = randomIntBetween(0, 10); final boolean system = randomBoolean(); - Map customMap = randomCustomMap(); - IndexMetadata metadata = randomIndexMetadata(system, customMap); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); + IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); + IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; + Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; + Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; + Map inferenceFields = randomInferenceFields(); + IndexReshardingMetadata reshardingMetadata = randomBoolean() ? randomIndexReshardingMetadata(numShard) : null; + + IndexMetadata metadata = IndexMetadata.builder("foo") + .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) + .creationDate(randomLong()) + .primaryTerm(0, 2) + .setRoutingNumShards(32) + .system(system) + .putCustom("my_custom", customMap) + .putRolloverInfo( + new RolloverInfo( + randomAlphaOfLength(5), + List.of( + new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong()), + new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), + new OptimalShardCountCondition(3) + ), + randomNonNegativeLong() + ) + ) + .mappingsUpdatedVersion(mappingsUpdatedVersion) + .stats(indexStats) + .indexWriteLoadForecast(indexWriteLoadForecast) + .shardSizeInBytesForecast(shardSizeInBytesForecast) + .putInferenceFields(inferenceFields) + .eventIngestedRange( + randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) + ) + ) + .reshardingMetadata(reshardingMetadata) + .build(); assertEquals(system, metadata.isSystem()); final XContentBuilder builder = JsonXContent.contentBuilder(); @@ -158,9 +200,51 @@ public void testIndexMetadataSerialization() throws IOException { } public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() throws IOException { + Integer numShard = randomFrom(1, 2, 4, 8, 16); + int numberOfReplicas = randomIntBetween(0, 10); final boolean system = randomBoolean(); - Map customMap = randomCustomMap(); - IndexMetadata metadata = randomIndexMetadata(system, customMap); + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); + IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numShard) : null; + Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; + Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; + Map inferenceFields = randomInferenceFields(); + + IndexMetadata metadata = IndexMetadata.builder("foo") + .settings(indexSettings(numShard, numberOfReplicas).put("index.version.created", 1)) + .creationDate(randomLong()) + .primaryTerm(0, 2) + .setRoutingNumShards(32) + .system(system) + .putCustom("my_custom", customMap) + .putRolloverInfo( + new RolloverInfo( + randomAlphaOfLength(5), + List.of( + new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong()), + new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), + new OptimalShardCountCondition(3) + ), + randomNonNegativeLong() + ) + ) + .stats(indexStats) + .indexWriteLoadForecast(indexWriteLoadForecast) + .shardSizeInBytesForecast(shardSizeInBytesForecast) + .putInferenceFields(inferenceFields) + .eventIngestedRange( + randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) + ) + ) + .build(); assertEquals(system, metadata.isSystem()); final XContentBuilder builder = JsonXContent.contentBuilder(); @@ -171,7 +255,13 @@ public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() thro // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); - removeEventIngestedField(indexMetadataMap); + + @SuppressWarnings("unchecked") + Map inner = (Map) indexMetadataMap.get("foo"); + assertTrue(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + inner.remove(IndexMetadata.KEY_EVENT_INGESTED_RANGE); + // validate that the IndexMetadata.KEY_EVENT_INGESTED_RANGE has been removed before calling fromXContent + assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); IndexMetadata fromXContentMeta; XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()) @@ -377,7 +467,7 @@ public void testNumberOfShardsIsNotNegative() { } private void runTestNumberOfShardsIsPositive(final int numberOfShards) { - final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, numberOfShards).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards).build(); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> IndexMetadata.builder("test").settings(settings).build() @@ -389,7 +479,10 @@ private void runTestNumberOfShardsIsPositive(final int numberOfShards) { } public void testMissingCreatedVersion() { - Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1).build(); + Settings settings = Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 1) + .build(); IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> IndexMetadata.builder("test").settings(settings).build() @@ -398,7 +491,7 @@ public void testMissingCreatedVersion() { } public void testMissingNumberOfReplicas() { - final Settings settings = Settings.builder().put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)).build(); + final Settings settings = Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)).build(); final IllegalArgumentException e = expectThrows( IllegalArgumentException.class, () -> IndexMetadata.builder("test").settings(settings).build() @@ -409,7 +502,7 @@ public void testMissingNumberOfReplicas() { public void testNumberOfReplicasIsNonNegative() { final int numberOfReplicas = -randomIntBetween(1, Integer.MAX_VALUE); final Settings settings = Settings.builder() - .put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)) + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 8)) .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas) .build(); final IllegalArgumentException e = expectThrows( @@ -594,7 +687,7 @@ public void testReshardingBWCSerialization() throws IOException { IndexMetadata idx = IndexMetadata.builder("test").settings(settings).reshardingMetadata(reshardingMetadata).build(); // the version prior to TransportVersions.INDEX_RESHARDING_METADATA - final var version = ESQL_FAILURE_FROM_REMOTE; + final var version = TransportVersions.ESQL_FAILURE_FROM_REMOTE; // should round trip final var deserialized = roundTripWithVersion(idx, version); @@ -604,179 +697,6 @@ public void testReshardingBWCSerialization() throws IOException { assertEquals(idx, IndexMetadata.builder(deserialized).reshardingMetadata(reshardingMetadata).build()); } - public void testIndexShardCountToXContent() throws IOException { - int expectedShards = randomIntBetween(0, 100); - IndexMetadata.IndexShardCount count = new IndexMetadata.IndexShardCount(expectedShards); - XContentBuilder builder = XContentFactory.jsonBuilder(); - builder.startObject(); - count.toXContent(builder, null); - builder.endObject(); - assertTrue(Strings.toString(builder).contains("\"shard_count\":" + expectedShards)); - } - - public void testIndexShardCountBuilder() { - int expectedShards = randomIntBetween(0, 100); - IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.builder().setCount(expectedShards).build(); - assertEquals(expectedShards, count.getCount()); - } - - public void testFromIndexMetaDataWithValidIndexMetaDataObject() throws IOException { - int numberOfShards = randomFrom(1, 2, 4, 8, 16); - IndexMetadata indexMetadata = randomIndexMetadata(numberOfShards); - - final XContentBuilder builder = JsonXContent.contentBuilder(); - builder.startObject(); - IndexMetadata.FORMAT.toXContent(builder, indexMetadata); - builder.endObject(); - XContentParser parser = createParser(builder); - - IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromIndexMetaData(parser); - assertEquals(numberOfShards, count.getCount()); - } - - public void testFromIndexMetaDataWithValidIndexMetaDataObjectWithoutEventIngestedField() throws IOException { - int numberOfShards = randomFrom(1, 2, 4, 8, 16); - IndexMetadata indexMetadata = randomIndexMetadata(numberOfShards); - - final XContentBuilder builder = JsonXContent.contentBuilder(); - builder.startObject(); - IndexMetadata.FORMAT.toXContent(builder, indexMetadata); - builder.endObject(); - XContentParser parser = createParser(builder); - - // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry - // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) - Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); - removeEventIngestedField(indexMetadataMap); - - IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromIndexMetaData(parser); - assertEquals(numberOfShards, count.getCount()); - } - - public void testFromIndexMetaDataWithoutNumberOfShardsSettingReturnsNegativeOne() throws IOException { - XContentBuilder builder = XContentFactory.jsonBuilder() - .startObject() - .field("my_index") - .startObject() - .startObject(KEY_SETTINGS) - // no shard count - .endObject() - .endObject() - .endObject(); - XContentParser parser = createParser(builder); - - IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromIndexMetaData(parser); - assertEquals(-1, count.getCount()); - } - - public void testFromLegacyIndexMetaDataWithNewCompatibleVersionThrowsException() throws IOException { - int numberOfShards = randomFrom(1, 2, 4, 8, 16); - XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata(numberOfShards, IndexVersion.current()); - XContentParser parser = createParser(indexMetadataBuilder); - assertThrows(IllegalStateException.class, () -> IndexMetadata.IndexShardCount.fromLegacyIndexMetaData(parser)); - } - - public void testFromLegacyIndexMetaDataWithOldIncompatibleVersionSucceeds() throws IOException { - int numberOfShards = randomFrom(1, 2, 4, 8, 16); - XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata( - numberOfShards, - IndexVersion.getMinimumCompatibleIndexVersion(1_000_000) - ); - XContentParser parser = createParser(indexMetadataBuilder); - IndexMetadata.IndexShardCount count = IndexMetadata.IndexShardCount.fromLegacyIndexMetaData(parser); - assertEquals(numberOfShards, count.getCount()); - } - - private XContentBuilder buildLegacyIndexMetadata(int numberOfShards, IndexVersion compatibilityVersion) throws IOException { - return XContentFactory.jsonBuilder() - .startObject() - .field("my_index") - .startObject() - .startObject(KEY_SETTINGS) - .field(SETTING_NUMBER_OF_SHARDS, numberOfShards) - .field("index.version.compatibility", compatibilityVersion) - .endObject() - .endObject() - .endObject(); - } - - private Map randomCustomMap() { - Map customMap = new HashMap<>(); - customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); - customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); - return customMap; - } - - private IndexMetadata randomIndexMetadata(int numberOfShards) { - final boolean system = randomBoolean(); - Map customMap = randomCustomMap(); - return randomIndexMetadata(numberOfShards, system, customMap); - } - - private IndexMetadata randomIndexMetadata(boolean system, Map customMap) { - return randomIndexMetadata(randomFrom(1, 2, 4, 8, 16), system, customMap); - } - - private IndexMetadata randomIndexMetadata(int numberOfShards, boolean system, Map customMap) { - int numberOfReplicas = randomIntBetween(0, 10); - IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); - IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numberOfShards) : null; - Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; - Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; - Map inferenceFields = randomInferenceFields(); - IndexReshardingMetadata reshardingMetadata = randomBoolean() ? randomIndexReshardingMetadata(numberOfShards) : null; - - return IndexMetadata.builder("foo") - .settings(indexSettings(numberOfShards, numberOfReplicas).put("index.version.created", 1)) - .creationDate(randomLong()) - .primaryTerm(0, 2) - .setRoutingNumShards(32) - .system(system) - .putCustom("my_custom", customMap) - .putRolloverInfo( - new RolloverInfo( - randomAlphaOfLength(5), - List.of( - new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), - new MaxDocsCondition(randomNonNegativeLong()), - new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), - new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), - new OptimalShardCountCondition(3) - ), - randomNonNegativeLong() - ) - ) - .mappingsUpdatedVersion(mappingsUpdatedVersion) - .stats(indexStats) - .indexWriteLoadForecast(indexWriteLoadForecast) - .shardSizeInBytesForecast(shardSizeInBytesForecast) - .putInferenceFields(inferenceFields) - .eventIngestedRange( - randomFrom( - IndexLongFieldRange.UNKNOWN, - IndexLongFieldRange.EMPTY, - IndexLongFieldRange.NO_SHARDS, - IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) - ) - ) - .reshardingMetadata(reshardingMetadata) - .build(); - } - - private void removeEventIngestedField(Map indexMetadataMap) { - // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry - // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) - // Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); - - @SuppressWarnings("unchecked") - Map inner = (Map) indexMetadataMap.get("foo"); - assertTrue(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); - inner.remove(IndexMetadata.KEY_EVENT_INGESTED_RANGE); - // validate that the IndexMetadata.KEY_EVENT_INGESTED_RANGE has been removed before calling fromXContent - assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); - } - private IndexMetadata roundTripWithVersion(IndexMetadata indexMetadata, TransportVersion version) throws IOException { try (BytesStreamOutput out = new BytesStreamOutput()) { out.setTransportVersion(version); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java new file mode 100644 index 0000000000000..38a4445a42add --- /dev/null +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java @@ -0,0 +1,265 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.repositories.blobstore; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.MaxSizeCondition; +import org.elasticsearch.action.admin.indices.rollover.OptimalShardCountCondition; +import org.elasticsearch.action.admin.indices.rollover.RolloverInfo; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadataStats; +import org.elasticsearch.cluster.metadata.IndexReshardingMetadata; +import org.elasticsearch.cluster.metadata.IndexWriteLoad; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; +import org.elasticsearch.cluster.metadata.InferenceFieldMetadataTests; +import org.elasticsearch.cluster.routing.allocation.DataTier; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.core.TimeValue; +import org.elasticsearch.index.IndexVersion; +import org.elasticsearch.index.shard.IndexLongFieldRange; +import org.elasticsearch.index.shard.ShardLongFieldRange; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.index.IndexVersionUtils; +import org.elasticsearch.xcontent.XContentBuilder; +import org.elasticsearch.xcontent.XContentFactory; +import org.elasticsearch.xcontent.XContentParser; +import org.elasticsearch.xcontent.XContentType; +import org.elasticsearch.xcontent.json.JsonXContent; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.elasticsearch.cluster.metadata.IndexMetadata.KEY_SETTINGS; +import static org.elasticsearch.cluster.metadata.IndexMetadata.SETTING_NUMBER_OF_SHARDS; + +public class IndexShardCountTests extends ESTestCase { + public void testFromIndexMetaDataWithValidIndexMetaDataObject() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + IndexMetadata indexMetadata = randomIndexMetadata(numberOfShards); + + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + IndexMetadata.FORMAT.toXContent(builder, indexMetadata); + builder.endObject(); + XContentParser parser = createParser(builder); + + IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + assertEquals(numberOfShards, count.count()); + } + + public void testFromIndexMetaDataWithValidIndexMetaDataObjectWithoutEventIngestedField() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + IndexMetadata indexMetadata = randomIndexMetadata(numberOfShards); + + final XContentBuilder builder = JsonXContent.contentBuilder(); + builder.startObject(); + IndexMetadata.FORMAT.toXContent(builder, indexMetadata); + builder.endObject(); + XContentParser parser = createParser(builder); + + // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry + // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); + removeEventIngestedField(indexMetadataMap); + + IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + assertEquals(numberOfShards, count.count()); + } + + public void testFromIndexMetaDataWithoutNumberOfShardsSettingReturnsNegativeOne() throws IOException { + XContentBuilder builder = XContentFactory.jsonBuilder() + .startObject() + .field("my_index") + .startObject() + .startObject(KEY_SETTINGS) + // no shard count + .endObject() + .endObject() + .endObject(); + XContentParser parser = createParser(builder); + + IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + assertEquals(-1, count.count()); + } + + public void testFromLegacyIndexMetaDataWithNewCompatibleVersionThrowsException() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata(numberOfShards, IndexVersion.current()); + XContentParser parser = createParser(indexMetadataBuilder); + assertThrows(IllegalStateException.class, () -> IndexShardCount.fromLegacyIndexMetaData(parser)); + } + + public void testFromLegacyIndexMetaDataWithOldIncompatibleVersionSucceeds() throws IOException { + int numberOfShards = randomFrom(1, 2, 4, 8, 16); + XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata( + numberOfShards, + IndexVersion.getMinimumCompatibleIndexVersion(1_000_000) + ); + XContentParser parser = createParser(indexMetadataBuilder); + IndexShardCount count = IndexShardCount.fromLegacyIndexMetaData(parser); + assertEquals(numberOfShards, count.count()); + } + + private XContentBuilder buildLegacyIndexMetadata(int numberOfShards, IndexVersion compatibilityVersion) throws IOException { + return XContentFactory.jsonBuilder() + .startObject() + .field("my_index") + .startObject() + .startObject(KEY_SETTINGS) + .field(SETTING_NUMBER_OF_SHARDS, numberOfShards) + .field("index.version.compatibility", compatibilityVersion) + .endObject() + .endObject() + .endObject(); + } + + private Map randomCustomMap() { + Map customMap = new HashMap<>(); + customMap.put(randomAlphaOfLength(5), randomAlphaOfLength(10)); + customMap.put(randomAlphaOfLength(10), randomAlphaOfLength(15)); + return customMap; + } + + private IndexMetadata randomIndexMetadata(int numberOfShards) { + final boolean system = randomBoolean(); + Map customMap = randomCustomMap(); + return randomIndexMetadata(numberOfShards, system, customMap); + } + + private IndexMetadata randomIndexMetadata(boolean system, Map customMap) { + return randomIndexMetadata(randomFrom(1, 2, 4, 8, 16), system, customMap); + } + + private IndexMetadata randomIndexMetadata(int numberOfShards, boolean system, Map customMap) { + int numberOfReplicas = randomIntBetween(0, 10); + IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); + IndexMetadataStats indexStats = randomBoolean() ? randomIndexStats(numberOfShards) : null; + Double indexWriteLoadForecast = randomBoolean() ? randomDoubleBetween(0.0, 128, true) : null; + Long shardSizeInBytesForecast = randomBoolean() ? randomLongBetween(1024, 10240) : null; + Map inferenceFields = randomInferenceFields(); + IndexReshardingMetadata reshardingMetadata = randomBoolean() ? randomIndexReshardingMetadata(numberOfShards) : null; + + return IndexMetadata.builder("foo") + .settings(indexSettings(numberOfShards, numberOfReplicas).put("index.version.created", 1)) + .creationDate(randomLong()) + .primaryTerm(0, 2) + .setRoutingNumShards(32) + .system(system) + .putCustom("my_custom", customMap) + .putRolloverInfo( + new RolloverInfo( + randomAlphaOfLength(5), + List.of( + new MaxAgeCondition(TimeValue.timeValueMillis(randomNonNegativeLong())), + new MaxDocsCondition(randomNonNegativeLong()), + new MaxSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardSizeCondition(ByteSizeValue.ofBytes(randomNonNegativeLong())), + new MaxPrimaryShardDocsCondition(randomNonNegativeLong()), + new OptimalShardCountCondition(3) + ), + randomNonNegativeLong() + ) + ) + .mappingsUpdatedVersion(mappingsUpdatedVersion) + .stats(indexStats) + .indexWriteLoadForecast(indexWriteLoadForecast) + .shardSizeInBytesForecast(shardSizeInBytesForecast) + .putInferenceFields(inferenceFields) + .eventIngestedRange( + randomFrom( + IndexLongFieldRange.UNKNOWN, + IndexLongFieldRange.EMPTY, + IndexLongFieldRange.NO_SHARDS, + IndexLongFieldRange.NO_SHARDS.extendWithShardRange(0, 1, ShardLongFieldRange.of(5000000, 5500000)) + ) + ) + .reshardingMetadata(reshardingMetadata) + .build(); + } + + private void removeEventIngestedField(Map indexMetadataMap) { + // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry + // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE) + // Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); + + @SuppressWarnings("unchecked") + Map inner = (Map) indexMetadataMap.get("foo"); + assertTrue(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + inner.remove(IndexMetadata.KEY_EVENT_INGESTED_RANGE); + // validate that the IndexMetadata.KEY_EVENT_INGESTED_RANGE has been removed before calling fromXContent + assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); + } + + private IndexMetadata roundTripWithVersion(IndexMetadata indexMetadata, TransportVersion version) throws IOException { + try (BytesStreamOutput out = new BytesStreamOutput()) { + out.setTransportVersion(version); + indexMetadata.writeTo(out); + try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { + in.setTransportVersion(version); + return IndexMetadata.readFrom(in); + } + } + } + + private static Settings indexSettingsWithDataTier(String dataTier) { + return indexSettings(IndexVersion.current(), 1, 0).put(DataTier.TIER_PREFERENCE, dataTier).build(); + } + + public static Map randomInferenceFields() { + Map map = new HashMap<>(); + int numFields = randomIntBetween(0, 5); + for (int i = 0; i < numFields; i++) { + String field = randomAlphaOfLengthBetween(5, 10); + map.put(field, randomInferenceFieldMetadata(field)); + } + return map; + } + + private static InferenceFieldMetadata randomInferenceFieldMetadata(String name) { + return new InferenceFieldMetadata( + name, + randomIdentifier(), + randomIdentifier(), + randomSet(1, 5, ESTestCase::randomIdentifier).toArray(String[]::new), + InferenceFieldMetadataTests.generateRandomChunkingSettings() + ); + } + + private IndexMetadataStats randomIndexStats(int numberOfShards) { + IndexWriteLoad.Builder indexWriteLoadBuilder = IndexWriteLoad.builder(numberOfShards); + int numberOfPopulatedWriteLoads = randomIntBetween(0, numberOfShards); + for (int i = 0; i < numberOfPopulatedWriteLoads; i++) { + indexWriteLoadBuilder.withShardWriteLoad( + i, + randomDoubleBetween(0.0, 128.0, true), + randomDoubleBetween(0.0, 128.0, true), + randomDoubleBetween(0.0, 128.0, true), + randomNonNegativeLong() + ); + } + return new IndexMetadataStats(indexWriteLoadBuilder.build(), randomLongBetween(100, 1024), randomIntBetween(1, 2)); + } + + private IndexReshardingMetadata randomIndexReshardingMetadata(int oldShards) { + return IndexReshardingMetadata.newSplitByMultiple(oldShards, randomIntBetween(2, 5)); + } +} From fa08eacd5b2e854275eb59750c07945fcc170300 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Wed, 29 Oct 2025 15:26:53 +0000 Subject: [PATCH 06/13] Fix tests --- .../cluster/metadata/IndexMetadataTests.java | 5 +++-- .../blobstore/IndexShardCountTests.java | 14 ++++++-------- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 39da67a81bec0..d0aae463dd193 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; @@ -67,6 +66,8 @@ public class IndexMetadataTests extends ESTestCase { + private static final TransportVersion ESQL_FAILURE_FROM_REMOTE = TransportVersion.fromName("esql_failure_from_remote"); + @Before public void setUp() throws Exception { super.setUp(); @@ -687,7 +688,7 @@ public void testReshardingBWCSerialization() throws IOException { IndexMetadata idx = IndexMetadata.builder("test").settings(settings).reshardingMetadata(reshardingMetadata).build(); // the version prior to TransportVersions.INDEX_RESHARDING_METADATA - final var version = TransportVersions.ESQL_FAILURE_FROM_REMOTE; + final var version = ESQL_FAILURE_FROM_REMOTE; // should round trip final var deserialized = roundTripWithVersion(idx, version); diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java index 38a4445a42add..8d86258a170aa 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java @@ -101,21 +101,19 @@ public void testFromIndexMetaDataWithoutNumberOfShardsSettingReturnsNegativeOne( assertEquals(-1, count.count()); } - public void testFromLegacyIndexMetaDataWithNewCompatibleVersionThrowsException() throws IOException { - int numberOfShards = randomFrom(1, 2, 4, 8, 16); - XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata(numberOfShards, IndexVersion.current()); - XContentParser parser = createParser(indexMetadataBuilder); - assertThrows(IllegalStateException.class, () -> IndexShardCount.fromLegacyIndexMetaData(parser)); - } + /* + IndexMetadata specifies two parsing methods legacyFromXContent and fromXContent to be used depending + on the IndexVersion. Since we are only reading the shard count, we should succeed in either case + */ - public void testFromLegacyIndexMetaDataWithOldIncompatibleVersionSucceeds() throws IOException { + public void testFromIndexMetaDataWithOldVersionSucceeds() throws IOException { int numberOfShards = randomFrom(1, 2, 4, 8, 16); XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata( numberOfShards, IndexVersion.getMinimumCompatibleIndexVersion(1_000_000) ); XContentParser parser = createParser(indexMetadataBuilder); - IndexShardCount count = IndexShardCount.fromLegacyIndexMetaData(parser); + IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); assertEquals(numberOfShards, count.count()); } From d699adf32d712c9edebf653993bd62aaa8aa231f Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Wed, 29 Oct 2025 15:27:41 +0000 Subject: [PATCH 07/13] Cleanup --- .../cluster/metadata/IndexMetadataTests.java | 2 +- .../blobstore/IndexShardCountTests.java | 25 ------------------- 2 files changed, 1 insertion(+), 26 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index d0aae463dd193..f3ef4fd193a1a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -267,7 +267,7 @@ public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() thro IndexMetadata fromXContentMeta; XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()) .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); - try (XContentParser xContentParser = XContentHelper.mapToXContentParser(config, indexMetadataMap);) { + try (XContentParser xContentParser = XContentHelper.mapToXContentParser(config, indexMetadataMap)) { fromXContentMeta = IndexMetadata.fromXContent(xContentParser); } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java index 8d86258a170aa..ee06340cff3eb 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java @@ -9,7 +9,6 @@ package org.elasticsearch.repositories.blobstore; -import org.elasticsearch.TransportVersion; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; @@ -23,12 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexWriteLoad; import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; import org.elasticsearch.cluster.metadata.InferenceFieldMetadataTests; -import org.elasticsearch.cluster.routing.allocation.DataTier; import org.elasticsearch.common.bytes.BytesReference; -import org.elasticsearch.common.io.stream.BytesStreamOutput; -import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; -import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; @@ -143,10 +137,6 @@ private IndexMetadata randomIndexMetadata(int numberOfShards) { return randomIndexMetadata(numberOfShards, system, customMap); } - private IndexMetadata randomIndexMetadata(boolean system, Map customMap) { - return randomIndexMetadata(randomFrom(1, 2, 4, 8, 16), system, customMap); - } - private IndexMetadata randomIndexMetadata(int numberOfShards, boolean system, Map customMap) { int numberOfReplicas = randomIntBetween(0, 10); IndexVersion mappingsUpdatedVersion = IndexVersionUtils.randomVersion(); @@ -207,21 +197,6 @@ private void removeEventIngestedField(Map indexMetadataMap) { assertFalse(inner.containsKey(IndexMetadata.KEY_EVENT_INGESTED_RANGE)); } - private IndexMetadata roundTripWithVersion(IndexMetadata indexMetadata, TransportVersion version) throws IOException { - try (BytesStreamOutput out = new BytesStreamOutput()) { - out.setTransportVersion(version); - indexMetadata.writeTo(out); - try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { - in.setTransportVersion(version); - return IndexMetadata.readFrom(in); - } - } - } - - private static Settings indexSettingsWithDataTier(String dataTier) { - return indexSettings(IndexVersion.current(), 1, 0).put(DataTier.TIER_PREFERENCE, dataTier).build(); - } - public static Map randomInferenceFields() { Map map = new HashMap<>(); int numFields = randomIntBetween(0, 5); From a951977729b5595e003878b06f6e27e6a4997830 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Wed, 29 Oct 2025 16:13:57 +0000 Subject: [PATCH 08/13] Minor tweaks --- .../repositories/blobstore/BlobStoreRepository.java | 2 +- .../repositories/blobstore/IndexShardCount.java | 6 ++++-- .../repositories/blobstore/IndexShardCountTests.java | 7 ++----- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 14ccec0692904..71418a453935a 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -399,7 +399,7 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { ); public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( - "shard-count", + "index-metadata", METADATA_NAME_FORMAT, (repoName, parser) -> IndexShardCount.fromIndexMetaData(parser), (ignored) -> { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java index f4795513c640c..5ade7b3ac55e3 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java @@ -54,11 +54,13 @@ public static IndexShardCount fromIndexMetaData(XContentParser parser) throws IO indexShardCount = new IndexShardCount(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1)); } else { // Iterate through the object, but we don't care for it's contents - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {} + while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { + } } } else if (token == XContentParser.Token.START_ARRAY) { // Iterate through the array, but we don't care for it's contents - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {} + while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { + } } else if (token.isValue() == false) { throw new IllegalArgumentException("Unexpected token " + token); } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java index ee06340cff3eb..9ec47089289c8 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java @@ -95,11 +95,8 @@ public void testFromIndexMetaDataWithoutNumberOfShardsSettingReturnsNegativeOne( assertEquals(-1, count.count()); } - /* - IndexMetadata specifies two parsing methods legacyFromXContent and fromXContent to be used depending - on the IndexVersion. Since we are only reading the shard count, we should succeed in either case - */ - + // IndexMetadata specifies two parsing methods legacyFromXContent and fromXContent to be used depending + // on the IndexVersion. Since we are only reading the shard count, we should succeed in either case public void testFromIndexMetaDataWithOldVersionSucceeds() throws IOException { int numberOfShards = randomFrom(1, 2, 4, 8, 16); XContentBuilder indexMetadataBuilder = buildLegacyIndexMetadata( From 8850b8bf7500126f506e460290b2f04d5ea2a6a3 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Thu, 30 Oct 2025 15:28:09 +0000 Subject: [PATCH 09/13] Adds testSnapshotCreatedInOldVersionCanBeDeletedInNew test --- .../MultiVersionRepositoryAccessIT.java | 49 ++++++++++++++++++- 1 file changed, 47 insertions(+), 2 deletions(-) diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 1b2fc46a7ff9d..744967d80d6dc 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -27,6 +27,7 @@ import java.io.IOException; import java.io.InputStream; import java.net.HttpURLConnection; +import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -227,6 +228,49 @@ public void testUpgradeMovesRepoToNewMetaVersion() throws IOException { } } + public void testSnapshotCreatedInOldVersionCanBeDeletedInNew() throws IOException { + final String repoName = getTestName(); + try { + final int shards = 3; + final String index = "test-index"; + createIndex(index, shards); + final IndexVersion minNodeVersion = minimumIndexVersion(); + // 7.12.0+ will try to load RepositoryData during repo creation if verify is true, which is impossible in case of version + // incompatibility in the downgrade test step. + final boolean verify = TEST_STEP != TestStep.STEP3_OLD_CLUSTER + || SnapshotsServiceUtils.includesUUIDs(minNodeVersion) + || minNodeVersion.before(IndexVersions.V_7_12_0); + createRepository(repoName, false, verify); + + // Create snapshots in the first step + if (TEST_STEP == TestStep.STEP1_OLD_CLUSTER) { + int numberOfSnapshots = randomIntBetween(5, 10); + for (int i = 0; i < numberOfSnapshots; i++) { + createSnapshot(repoName, "snapshot-" + i, index); + } + final List> snapshots = listSnapshots(repoName); + assertSnapshotStatusSuccessful(repoName, snapshots.stream().map(sn -> (String) sn.get("snapshot")).toArray(String[]::new)); + } else if (TEST_STEP == TestStep.STEP2_NEW_CLUSTER) { + final List> snapshots = listSnapshots(repoName); + List snapshotNames = new ArrayList<>(snapshots.stream().map(sn -> (String) sn.get("snapshot")).toList()); + + // Delete a single snapshot + deleteSnapshot(repoName, snapshotNames.removeFirst()); + + // Delete a bulk number of snapshots, avoiding the case where we delete all snapshots since this invokes + // cleanup code and bulk snapshot deletion logic which is tested in testUpgradeMovesRepoToNewMetaVersion + final List snapshotsToDeleteInBulk = randomSubsetOf(randomIntBetween(1, snapshotNames.size() - 1), snapshotNames); + deleteSnapshot(repoName, snapshotsToDeleteInBulk.toArray(String[]::new)); + snapshotNames.removeAll(snapshotsToDeleteInBulk); + + // Delete the rest of the snapshots (will invoke bulk snapshot deletion logic) + deleteSnapshot(repoName, snapshotNames.toArray(String[]::new)); + } + } finally { + deleteRepository(repoName); + } + } + private static void assertSnapshotStatusSuccessful(String repoName, String... snapshots) throws IOException { Request statusReq = new Request("GET", "/_snapshot/" + repoName + "/" + String.join(",", snapshots) + "/_status"); ObjectPath statusResp = ObjectPath.createFromResponse(client().performRequest(statusReq)); @@ -235,8 +279,9 @@ private static void assertSnapshotStatusSuccessful(String repoName, String... sn } } - private void deleteSnapshot(String repoName, String name) throws IOException { - assertAcknowledged(client().performRequest(new Request("DELETE", "/_snapshot/" + repoName + "/" + name))); + private void deleteSnapshot(String repoName, String... names) throws IOException { + String joinedNames = String.join(",", names); + assertAcknowledged(client().performRequest(new Request("DELETE", "/_snapshot/" + repoName + "/" + joinedNames))); } @SuppressWarnings("unchecked") From e213e01bb8b491903322c25d77044265d2991c64 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Thu, 30 Oct 2025 15:30:53 +0000 Subject: [PATCH 10/13] Update changelog file --- docs/changelog/137210.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/docs/changelog/137210.yaml b/docs/changelog/137210.yaml index bdd27bdeb0e5b..070c6a003fb81 100644 --- a/docs/changelog/137210.yaml +++ b/docs/changelog/137210.yaml @@ -1,5 +1,6 @@ pr: 137210 -summary: "[WIP] Introduce INDEX_SHARD_COUNT_FORMAT" +summary: "Introduce INDEX_SHARD_COUNT_FORMAT" area: Snapshot/Restore type: bug -issues: [] +issues: + - 131822 From d21f336b10a4579aa6f39c12bd347995f5737ccd Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Thu, 30 Oct 2025 15:41:52 +0000 Subject: [PATCH 11/13] Comments --- .../cluster/metadata/IndexMetadata.java | 18 +++++++----------- .../blobstore/BlobStoreRepository.java | 12 +++++++----- .../blobstore/IndexShardCount.java | 6 ++---- .../cluster/metadata/IndexMetadataTests.java | 7 +++---- 4 files changed, 19 insertions(+), 24 deletions(-) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java index cfc5b88ef58ef..5802f0e50294b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexMetadata.java @@ -2881,7 +2881,13 @@ public static IndexMetadata legacyFromXContent(XContentParser parser) throws IOE } else if (token == XContentParser.Token.START_OBJECT) { if ("settings".equals(currentFieldName)) { Settings settings = Settings.fromXContent(parser); - checkSettingIndexVersionCompatibility(settings); + if (SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).isLegacyIndexVersion() == false) { + throw new IllegalStateException( + "this method should only be used to parse older incompatible index metadata versions " + + "but got " + + SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).toReleaseVersion() + ); + } builder.settings(settings); } else if ("mappings".equals(currentFieldName)) { Map mappingSourceBuilder = new HashMap<>(); @@ -2974,16 +2980,6 @@ private static void handleLegacyMapping(Builder builder, Map map } } - private static void checkSettingIndexVersionCompatibility(Settings settings) { - if (SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).isLegacyIndexVersion() == false) { - throw new IllegalStateException( - "this method should only be used to parse older incompatible index metadata versions " - + "but got " - + SETTING_INDEX_VERSION_COMPATIBILITY.get(settings).toReleaseVersion() - ); - } - } - /** * Return the {@link IndexVersion} of Elasticsearch that has been used to create an index given its settings. * diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 71418a453935a..d4399ce352c86 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -398,6 +398,9 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { Function.identity() ); + /** + * Parses only the shard count from the IndexMetadata object written by INDEX_METADATA_FORMAT (#131822) + */ public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", METADATA_NAME_FORMAT, @@ -1343,11 +1346,10 @@ private void getOneShardCount(String indexMetaGeneration) { logger.warn(() -> format("[%s] [%s] failed to read shard count for index", indexMetaGeneration, indexId.getName()), ex); // Definitely indicates something fairly badly wrong with the repo, but not immediately fatal here: we might get the // shard count from a subsequent indexMetaGeneration, or we might just not process these shards. If we skip these shards - // then the - // repository will technically enter an invalid state (these shards' index-XXX blobs will refer to snapshots that no - // longer exist) and may contain dangling blobs too. A subsequent delete that hits this index may repair the state if - // the metadata read error is transient, but if not then the stale indices cleanup will eventually remove this index - // and all its extra data anyway. + // then the repository will technically enter an invalid state (these shards' index-XXX blobs will refer to snapshots + // that no longer exist) and may contain dangling blobs too. A subsequent delete that hits this index may repair + // the state if the metadata read error is transient, but if not then the stale indices cleanup will eventually + // remove this index and all its extra data anyway. // TODO: Should we fail the delete here? See https://github.com/elastic/elasticsearch/issues/100569. } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java index 5ade7b3ac55e3..d701f8c691bfc 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java @@ -54,13 +54,11 @@ public static IndexShardCount fromIndexMetaData(XContentParser parser) throws IO indexShardCount = new IndexShardCount(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1)); } else { // Iterate through the object, but we don't care for it's contents - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - } + parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { // Iterate through the array, but we don't care for it's contents - while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) { - } + parser.skipChildren(); } else if (token.isValue() == false) { throw new IllegalArgumentException("Unexpected token " + token); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index f3ef4fd193a1a..39da67a81bec0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -10,6 +10,7 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; @@ -66,8 +67,6 @@ public class IndexMetadataTests extends ESTestCase { - private static final TransportVersion ESQL_FAILURE_FROM_REMOTE = TransportVersion.fromName("esql_failure_from_remote"); - @Before public void setUp() throws Exception { super.setUp(); @@ -267,7 +266,7 @@ public void testIndexMetadataFromXContentParsingWithoutEventIngestedField() thro IndexMetadata fromXContentMeta; XContentParserConfiguration config = XContentParserConfiguration.EMPTY.withRegistry(xContentRegistry()) .withDeprecationHandler(LoggingDeprecationHandler.INSTANCE); - try (XContentParser xContentParser = XContentHelper.mapToXContentParser(config, indexMetadataMap)) { + try (XContentParser xContentParser = XContentHelper.mapToXContentParser(config, indexMetadataMap);) { fromXContentMeta = IndexMetadata.fromXContent(xContentParser); } @@ -688,7 +687,7 @@ public void testReshardingBWCSerialization() throws IOException { IndexMetadata idx = IndexMetadata.builder("test").settings(settings).reshardingMetadata(reshardingMetadata).build(); // the version prior to TransportVersions.INDEX_RESHARDING_METADATA - final var version = ESQL_FAILURE_FROM_REMOTE; + final var version = TransportVersions.ESQL_FAILURE_FROM_REMOTE; // should round trip final var deserialized = roundTripWithVersion(idx, version); From a9c22ad90cd24296b17ff0f1f308ffc2054229cf Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Thu, 30 Oct 2025 15:48:51 +0000 Subject: [PATCH 12/13] Revert IndexMetadataTests change that appeared after merging main --- .../elasticsearch/cluster/metadata/IndexMetadataTests.java | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java index 39da67a81bec0..d0aae463dd193 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataTests.java @@ -10,7 +10,6 @@ package org.elasticsearch.cluster.metadata; import org.elasticsearch.TransportVersion; -import org.elasticsearch.TransportVersions; import org.elasticsearch.action.admin.indices.rollover.MaxAgeCondition; import org.elasticsearch.action.admin.indices.rollover.MaxDocsCondition; import org.elasticsearch.action.admin.indices.rollover.MaxPrimaryShardDocsCondition; @@ -67,6 +66,8 @@ public class IndexMetadataTests extends ESTestCase { + private static final TransportVersion ESQL_FAILURE_FROM_REMOTE = TransportVersion.fromName("esql_failure_from_remote"); + @Before public void setUp() throws Exception { super.setUp(); @@ -687,7 +688,7 @@ public void testReshardingBWCSerialization() throws IOException { IndexMetadata idx = IndexMetadata.builder("test").settings(settings).reshardingMetadata(reshardingMetadata).build(); // the version prior to TransportVersions.INDEX_RESHARDING_METADATA - final var version = TransportVersions.ESQL_FAILURE_FROM_REMOTE; + final var version = ESQL_FAILURE_FROM_REMOTE; // should round trip final var deserialized = roundTripWithVersion(idx, version); From 52dac2eb3006f1b9cce502a6dc4696daabf8cb15 Mon Sep 17 00:00:00 2001 From: Joshua Adams Date: Fri, 31 Oct 2025 12:57:19 +0000 Subject: [PATCH 13/13] Comments --- .../MultiVersionRepositoryAccessIT.java | 15 ++++++--- .../blobstore/BlobStoreRepository.java | 4 +-- .../blobstore/IndexShardCount.java | 32 ++++++++++--------- .../blobstore/IndexShardCountTests.java | 16 +++++++--- 4 files changed, 40 insertions(+), 27 deletions(-) diff --git a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java index 744967d80d6dc..57d9240772506 100644 --- a/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java +++ b/qa/repository-multi-version/src/test/java/org/elasticsearch/upgrades/MultiVersionRepositoryAccessIT.java @@ -260,11 +260,11 @@ public void testSnapshotCreatedInOldVersionCanBeDeletedInNew() throws IOExceptio // Delete a bulk number of snapshots, avoiding the case where we delete all snapshots since this invokes // cleanup code and bulk snapshot deletion logic which is tested in testUpgradeMovesRepoToNewMetaVersion final List snapshotsToDeleteInBulk = randomSubsetOf(randomIntBetween(1, snapshotNames.size() - 1), snapshotNames); - deleteSnapshot(repoName, snapshotsToDeleteInBulk.toArray(String[]::new)); + deleteSnapshot(repoName, snapshotsToDeleteInBulk); snapshotNames.removeAll(snapshotsToDeleteInBulk); // Delete the rest of the snapshots (will invoke bulk snapshot deletion logic) - deleteSnapshot(repoName, snapshotNames.toArray(String[]::new)); + deleteSnapshot(repoName, snapshotNames); } } finally { deleteRepository(repoName); @@ -279,9 +279,14 @@ private static void assertSnapshotStatusSuccessful(String repoName, String... sn } } - private void deleteSnapshot(String repoName, String... names) throws IOException { - String joinedNames = String.join(",", names); - assertAcknowledged(client().performRequest(new Request("DELETE", "/_snapshot/" + repoName + "/" + joinedNames))); + private void deleteSnapshot(String repoName, List names) throws IOException { + assertAcknowledged( + client().performRequest(new Request("DELETE", "/_snapshot/" + repoName + "/" + Strings.collectionToCommaDelimitedString(names))) + ); + } + + private void deleteSnapshot(String repoName, String name) throws IOException { + assertAcknowledged(client().performRequest(new Request("DELETE", "/_snapshot/" + repoName + "/" + name))); } @SuppressWarnings("unchecked") diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d4399ce352c86..3c2a749938eee 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -404,7 +404,7 @@ public static String getRepositoryDataBlobName(long repositoryGeneration) { public static final ChecksumBlobStoreFormat INDEX_SHARD_COUNT_FORMAT = new ChecksumBlobStoreFormat<>( "index-metadata", METADATA_NAME_FORMAT, - (repoName, parser) -> IndexShardCount.fromIndexMetaData(parser), + (repoName, parser) -> IndexShardCount.fromIndexMetadata(parser), (ignored) -> { assert false; throw new UnsupportedOperationException(); @@ -1345,7 +1345,7 @@ private void getOneShardCount(String indexMetaGeneration) { } catch (Exception ex) { logger.warn(() -> format("[%s] [%s] failed to read shard count for index", indexMetaGeneration, indexId.getName()), ex); // Definitely indicates something fairly badly wrong with the repo, but not immediately fatal here: we might get the - // shard count from a subsequent indexMetaGeneration, or we might just not process these shards. If we skip these shards + // shard count from another metadata blob, or we might just not process these shards. If we skip these shards // then the repository will technically enter an invalid state (these shards' index-XXX blobs will refer to snapshots // that no longer exist) and may contain dangling blobs too. A subsequent delete that hits this index may repair // the state if the metadata read error is transient, but if not then the stale indices cleanup will eventually diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java index d701f8c691bfc..5758fc4ebf259 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/IndexShardCount.java @@ -10,7 +10,6 @@ package org.elasticsearch.repositories.blobstore; import org.elasticsearch.cluster.metadata.IndexMetadata; -import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentParserUtils; import org.elasticsearch.xcontent.XContentParser; @@ -32,38 +31,41 @@ public record IndexShardCount(int count) { * @return Returns an {@link IndexShardCount} containing the shard count for the index * @throws IOException Thrown if the {@link IndexMetadata} object cannot be parsed correctly */ - public static IndexShardCount fromIndexMetaData(XContentParser parser) throws IOException { - if (parser.currentToken() == null) { // fresh parser? move to the first token - parser.nextToken(); - } - if (parser.currentToken() == XContentParser.Token.START_OBJECT) { // on a start object move to next token - parser.nextToken(); - } + public static IndexShardCount fromIndexMetadata(XContentParser parser) throws IOException { + parser.nextToken(); // fresh parser so move to the first token + parser.nextToken(); // on a start object move to next token XContentParserUtils.ensureExpectedToken(XContentParser.Token.FIELD_NAME, parser.currentToken(), parser); String currentFieldName; XContentParser.Token token = parser.nextToken(); XContentParserUtils.ensureExpectedToken(XContentParser.Token.START_OBJECT, token, parser); - IndexShardCount indexShardCount = new IndexShardCount(-1); - // Skip over everything except the settings object we care about, or any unexpected tokens + IndexShardCount indexShardCount = null; + // Skip over everything except the index.number_of_shards setting, or any unexpected tokens while ((currentFieldName = parser.nextFieldName()) != null) { token = parser.nextToken(); if (token == XContentParser.Token.START_OBJECT) { if (currentFieldName.equals(KEY_SETTINGS)) { - Settings settings = Settings.fromXContent(parser); - indexShardCount = new IndexShardCount(settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1)); + while (parser.nextToken() != XContentParser.Token.END_OBJECT) { + String fieldName = parser.currentName(); + parser.nextToken(); + if (SETTING_NUMBER_OF_SHARDS.equals(fieldName)) { + indexShardCount = new IndexShardCount(parser.intValue()); + } else { + parser.skipChildren(); + } + } } else { - // Iterate through the object, but we don't care for it's contents parser.skipChildren(); } } else if (token == XContentParser.Token.START_ARRAY) { - // Iterate through the array, but we don't care for it's contents parser.skipChildren(); } else if (token.isValue() == false) { throw new IllegalArgumentException("Unexpected token " + token); } } XContentParserUtils.ensureExpectedToken(XContentParser.Token.END_OBJECT, parser.nextToken(), parser); - return indexShardCount; + + // indexShardCount is null if corruption when parsing + return indexShardCount != null ? indexShardCount : new IndexShardCount(-1); } } diff --git a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java index 9ec47089289c8..7b2b5adb585f9 100644 --- a/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/blobstore/IndexShardCountTests.java @@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.InferenceFieldMetadata; import org.elasticsearch.cluster.metadata.InferenceFieldMetadataTests; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.core.TimeValue; @@ -56,7 +57,7 @@ public void testFromIndexMetaDataWithValidIndexMetaDataObject() throws IOExcepti builder.endObject(); XContentParser parser = createParser(builder); - IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + IndexShardCount count = IndexShardCount.fromIndexMetadata(parser); assertEquals(numberOfShards, count.count()); } @@ -75,7 +76,7 @@ public void testFromIndexMetaDataWithValidIndexMetaDataObjectWithoutEventIngeste Map indexMetadataMap = XContentHelper.convertToMap(BytesReference.bytes(builder), true, XContentType.JSON).v2(); removeEventIngestedField(indexMetadataMap); - IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + IndexShardCount count = IndexShardCount.fromIndexMetadata(parser); assertEquals(numberOfShards, count.count()); } @@ -91,7 +92,7 @@ public void testFromIndexMetaDataWithoutNumberOfShardsSettingReturnsNegativeOne( .endObject(); XContentParser parser = createParser(builder); - IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + IndexShardCount count = IndexShardCount.fromIndexMetadata(parser); assertEquals(-1, count.count()); } @@ -104,7 +105,7 @@ public void testFromIndexMetaDataWithOldVersionSucceeds() throws IOException { IndexVersion.getMinimumCompatibleIndexVersion(1_000_000) ); XContentParser parser = createParser(indexMetadataBuilder); - IndexShardCount count = IndexShardCount.fromIndexMetaData(parser); + IndexShardCount count = IndexShardCount.fromIndexMetadata(parser); assertEquals(numberOfShards, count.count()); } @@ -144,7 +145,7 @@ private IndexMetadata randomIndexMetadata(int numberOfShards, boolean system, Ma IndexReshardingMetadata reshardingMetadata = randomBoolean() ? randomIndexReshardingMetadata(numberOfShards) : null; return IndexMetadata.builder("foo") - .settings(indexSettings(numberOfShards, numberOfReplicas).put("index.version.created", 1)) + .settings(randomSettings(numberOfShards, numberOfReplicas)) .creationDate(randomLong()) .primaryTerm(0, 2) .setRoutingNumShards(32) @@ -181,6 +182,11 @@ private IndexMetadata randomIndexMetadata(int numberOfShards, boolean system, Ma .build(); } + private Settings.Builder randomSettings(int numberOfShards, int numberOfReplicas) { + return indexSettings(numberOfShards, numberOfReplicas).put("index.version.created", 1) + .putList("index.query.default_field", "title", "description", "tags"); + } + private void removeEventIngestedField(Map indexMetadataMap) { // convert XContent to a map and remove the IndexMetadata.KEY_EVENT_INGESTED_RANGE entry // to simulate IndexMetadata from an older cluster version (before TransportVersions.EVENT_INGESTED_RANGE_IN_CLUSTER_STATE)