diff --git a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java index eb528115bbc02..b80f74a1b81bb 100644 --- a/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java +++ b/server/src/main/java/org/opensearch/cluster/metadata/IndexMetadata.java @@ -81,6 +81,7 @@ import java.time.ZoneOffset; import java.time.ZonedDateTime; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -1080,9 +1081,7 @@ public Iterator> settings() { static final String KEY_ALIASES = "aliases"; static final String KEY_ROLLOVER_INFOS = "rollover_info"; static final String KEY_SYSTEM = "system"; - static final String KEY_SPLIT_SHARDS_METADATA = "split_shards_metadata"; public static final String KEY_PRIMARY_TERMS = "primary_terms"; - public static final String KEY_PRIMARY_TERMS_MAP = "primary_terms_map"; public static final String REMOTE_STORE_CUSTOM_KEY = "remote_store"; public static final String TRANSLOG_METADATA_KEY = "translog_metadata"; public static final String REMOTE_STORE_SSE_ENABLED_INDEX_KEY = "sse_enabled_index"; @@ -1109,7 +1108,7 @@ public Iterator> settings() { private final long aliasesVersion; - private final Map primaryTermsMap; + private final long[] primaryTerms; private final State state; @@ -1148,14 +1147,13 @@ public Iterator> settings() { private final Context context; private final IngestionStatus ingestionStatus; - private final SplitShardsMetadata splitShardsMetadata; - private IndexMetadata( final Index index, final long version, final long mappingVersion, final long settingsVersion, final long aliasesVersion, + final long[] primaryTerms, final State state, final int numberOfShards, final int numberOfReplicas, @@ -1182,9 +1180,7 @@ private IndexMetadata( final int indexTotalRemoteCapablePrimaryShardsPerNodeLimit, boolean isAppendOnlyIndex, final Context context, - final IngestionStatus ingestionStatus, - final SplitShardsMetadata splitShardsMetadata, - final Map primaryTermsMap + final IngestionStatus ingestionStatus ) { this.index = index; @@ -1195,8 +1191,8 @@ private IndexMetadata( this.settingsVersion = settingsVersion; assert aliasesVersion >= 0 : aliasesVersion; this.aliasesVersion = aliasesVersion; - assert primaryTermsMap.size() == numberOfShards; - this.primaryTermsMap = Collections.unmodifiableMap(primaryTermsMap); + this.primaryTerms = primaryTerms; + assert primaryTerms.length == numberOfShards; this.state = state; this.numberOfShards = numberOfShards; this.numberOfReplicas = numberOfReplicas; @@ -1237,7 +1233,6 @@ private IndexMetadata( } this.context = context; this.ingestionStatus = ingestionStatus; - this.splitShardsMetadata = splitShardsMetadata; assert numberOfShards * routingFactor == routingNumShards : routingNumShards + " must be a multiple of " + numberOfShards; } @@ -1285,11 +1280,7 @@ public long getAliasesVersion() { * that can be indexed into) is larger than 0. See {@link IndexMetadataUpdater#applyChanges}. **/ public long primaryTerm(int shardId) { - Long pTerm = this.primaryTermsMap.get(shardId); - if (pTerm == null) { - throw new IllegalArgumentException("No primary term available for shard " + shardId); - } - return pTerm; + return this.primaryTerms[shardId]; } /** @@ -1472,10 +1463,6 @@ public Set inSyncAllocationIds(int shardId) { return inSyncAllocationIds.get(shardId); } - public SplitShardsMetadata getSplitShardsMetadata() { - return splitShardsMetadata; - } - public int getIndexTotalShardsPerNodeLimit() { return this.indexTotalShardsPerNodeLimit; } @@ -1559,7 +1546,7 @@ public boolean equals(Object o) { if (routingFactor != that.routingFactor) { return false; } - if (!primaryTermsMap.equals(that.primaryTermsMap)) { + if (Arrays.equals(primaryTerms, that.primaryTerms) == false) { return false; } if (!inSyncAllocationIds.equals(that.inSyncAllocationIds)) { @@ -1577,9 +1564,6 @@ public boolean equals(Object o) { if (Objects.equals(ingestionStatus, that.ingestionStatus) == false) { return false; } - if (!Objects.equals(splitShardsMetadata, that.splitShardsMetadata)) { - return false; - } return true; } @@ -1594,13 +1578,12 @@ public int hashCode() { result = 31 * result + customData.hashCode(); result = 31 * result + Long.hashCode(routingFactor); result = 31 * result + Long.hashCode(routingNumShards); - result = 31 * result + primaryTermsMap.hashCode(); + result = 31 * result + Arrays.hashCode(primaryTerms); result = 31 * result + inSyncAllocationIds.hashCode(); result = 31 * result + rolloverInfos.hashCode(); result = 31 * result + Boolean.hashCode(isSystem); result = 31 * result + Objects.hashCode(context); result = 31 * result + Objects.hashCode(ingestionStatus); - result = 31 * result + Objects.hashCode(splitShardsMetadata); return result; } @@ -1636,6 +1619,7 @@ static class IndexMetadataDiff implements Diff { private final long mappingVersion; private final long settingsVersion; private final long aliasesVersion; + private final long[] primaryTerms; private final State state; private final Settings settings; private final Diff> mappings; @@ -1646,8 +1630,6 @@ static class IndexMetadataDiff implements Diff { private final boolean isSystem; private final Context context; private final IngestionStatus ingestionStatus; - private final Diff splitMetadata; - private final Map primaryTermsMap; IndexMetadataDiff(IndexMetadata before, IndexMetadata after) { index = after.index.getName(); @@ -1658,6 +1640,7 @@ static class IndexMetadataDiff implements Diff { routingNumShards = after.routingNumShards; state = after.state; settings = after.settings; + primaryTerms = after.primaryTerms; mappings = DiffableUtils.diff(before.mappings, after.mappings, DiffableUtils.getStringKeySerializer()); aliases = DiffableUtils.diff(before.aliases, after.aliases, DiffableUtils.getStringKeySerializer()); customData = DiffableUtils.diff(before.customData, after.customData, DiffableUtils.getStringKeySerializer()); @@ -1671,8 +1654,6 @@ static class IndexMetadataDiff implements Diff { isSystem = after.isSystem; context = after.context; ingestionStatus = after.ingestionStatus; - splitMetadata = after.splitShardsMetadata.diff(before.splitShardsMetadata); - primaryTermsMap = after.primaryTermsMap; } private static final DiffableUtils.DiffableValueReader ALIAS_METADATA_DIFF_VALUE_READER = @@ -1693,10 +1674,7 @@ static class IndexMetadataDiff implements Diff { aliasesVersion = in.readVLong(); state = State.fromId(in.readByte()); settings = Settings.readSettingsFromStream(in); - long[] primaryTerms = null; - if (in.getVersion().before(Version.V_3_6_0)) { - primaryTerms = in.readVLongArray(); - } + primaryTerms = in.readVLongArray(); mappings = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), MAPPING_DIFF_VALUE_READER); aliases = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), ALIAS_METADATA_DIFF_VALUE_READER); customData = DiffableUtils.readJdkMapDiff(in, DiffableUtils.getStringKeySerializer(), CUSTOM_DIFF_VALUE_READER); @@ -1717,19 +1695,6 @@ static class IndexMetadataDiff implements Diff { } else { ingestionStatus = null; } - - if (in.getVersion().onOrAfter(Version.V_3_6_0)) { - splitMetadata = SplitShardsMetadata.readDiffFrom(in); - primaryTermsMap = in.readMap(StreamInput::readInt, StreamInput::readLong); - } else { - assert primaryTerms != null; - splitMetadata = null; - Map primaryTermsMap = new HashMap<>(); - for (int shardId = 0; shardId < primaryTerms.length; shardId++) { - primaryTermsMap.put(shardId, primaryTerms[shardId]); - } - this.primaryTermsMap = primaryTermsMap; - } } @Override @@ -1742,16 +1707,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeVLong(aliasesVersion); out.writeByte(state.id); Settings.writeSettingsToStream(settings, out); - if (out.getVersion().before(Version.V_3_6_0)) { - int numShards = INDEX_NUMBER_OF_SHARDS_SETTING.get(settings); - long[] primaryTermsArray = new long[numShards]; - for (Map.Entry entry : primaryTermsMap.entrySet()) { - if (entry.getKey() < numShards) { - primaryTermsArray[entry.getKey()] = entry.getValue(); - } - } - out.writeVLongArray(primaryTermsArray); - } + out.writeVLongArray(primaryTerms); mappings.writeTo(out); aliases.writeTo(out); customData.writeTo(out); @@ -1764,11 +1720,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_3_0_0)) { out.writeOptionalWriteable(ingestionStatus); } - - if (out.getVersion().onOrAfter(Version.V_3_6_0)) { - splitMetadata.writeTo(out); - out.writeMap(primaryTermsMap, StreamOutput::writeInt, StreamOutput::writeLong); - } } @Override @@ -1781,7 +1732,7 @@ public IndexMetadata apply(IndexMetadata part) { builder.setRoutingNumShards(routingNumShards); builder.state(state); builder.settings(settings); - builder.primaryTermsMap(primaryTermsMap); + builder.primaryTerms(primaryTerms); builder.mappings.putAll(mappings.apply(part.mappings)); builder.aliases.putAll(aliases.apply(part.aliases)); builder.customMetadata.putAll(customData.apply(part.customData)); @@ -1790,9 +1741,6 @@ public IndexMetadata apply(IndexMetadata part) { builder.system(isSystem); builder.context(context); builder.ingestionStatus(ingestionStatus); - if (splitMetadata != null) { - builder.splitShardsMetadata(splitMetadata.apply(part.splitShardsMetadata)); - } // TODO: support ingestion source return builder.build(); } @@ -1807,10 +1755,7 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { builder.setRoutingNumShards(in.readInt()); builder.state(State.fromId(in.readByte())); builder.settings(readSettingsFromStream(in)); - long[] primaryTerms = null; - if (in.getVersion().before(Version.V_3_6_0)) { - primaryTerms = in.readVLongArray(); - } + builder.primaryTerms(in.readVLongArray()); int mappingsSize = in.readVInt(); for (int i = 0; i < mappingsSize; i++) { MappingMetadata mappingMd = new MappingMetadata(in); @@ -1846,35 +1791,9 @@ public static IndexMetadata readFrom(StreamInput in) throws IOException { if (in.getVersion().onOrAfter(Version.V_3_0_0)) { builder.ingestionStatus(in.readOptionalWriteable(IngestionStatus::new)); } - - if (in.getVersion().onOrAfter(Version.V_3_6_0)) { - builder.splitShardsMetadata(new SplitShardsMetadata(in)); - builder.primaryTermsMap(in.readMap(StreamInput::readInt, StreamInput::readLong)); - } else { - assert primaryTerms != null; - Map primaryTermsMapFromArray = new HashMap<>(); - for (int shardId = 0; shardId < primaryTerms.length; shardId++) { - primaryTermsMapFromArray.put(shardId, primaryTerms[shardId]); - } - builder.primaryTermsMap(primaryTermsMapFromArray); - } return builder.build(); } - /** - * Builds a primaryTerms long array from primaryTermsMap for backward-compatible wire serialization - * to pre-V_3_6_0 nodes. - */ - private long[] buildPrimaryTermsArray() { - long[] terms = new long[numberOfShards]; - for (Map.Entry entry : primaryTermsMap.entrySet()) { - if (entry.getKey() < numberOfShards) { - terms[entry.getKey()] = entry.getValue(); - } - } - return terms; - } - @Override public void writeTo(StreamOutput out) throws IOException { out.writeString(index.getName()); // uuid will come as part of settings @@ -1885,9 +1804,7 @@ public void writeTo(StreamOutput out) throws IOException { out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); - if (out.getVersion().before(Version.V_3_6_0)) { - out.writeVLongArray(buildPrimaryTermsArray()); - } + out.writeVLongArray(primaryTerms); out.writeVInt(mappings.size()); for (final MappingMetadata cursor : mappings.values()) { cursor.writeTo(out); @@ -1919,11 +1836,6 @@ public void writeTo(StreamOutput out) throws IOException { if (out.getVersion().onOrAfter(Version.V_3_0_0)) { out.writeOptionalWriteable(ingestionStatus); } - - if (out.getVersion().onOrAfter(Version.V_3_6_0)) { - splitShardsMetadata.writeTo(out); - out.writeMap(primaryTermsMap, StreamOutput::writeInt, StreamOutput::writeLong); - } } @Override @@ -1936,9 +1848,7 @@ public void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOExcepti out.writeInt(routingNumShards); out.writeByte(state.id()); writeSettingsToStream(settings, out); - if (out.getVersion().before(Version.V_3_6_0)) { - out.writeVLongArray(buildPrimaryTermsArray()); - } + out.writeVLongArray(primaryTerms); out.writeMapValues(mappings, (stream, val) -> val.writeVerifiableTo((BufferedChecksumStreamOutput) stream)); out.writeMapValues(aliases, (stream, val) -> val.writeTo(stream)); out.writeMap(customData, StreamOutput::writeString, (stream, val) -> val.writeTo(stream)); @@ -1952,11 +1862,6 @@ public void writeVerifiableTo(BufferedChecksumStreamOutput out) throws IOExcepti if (out.getVersion().onOrAfter(Version.V_2_17_0)) { out.writeOptionalWriteable(context); } - - if (out.getVersion().onOrAfter(Version.V_3_6_0)) { - splitShardsMetadata.writeTo(out); - out.writeMap(primaryTermsMap, StreamOutput::writeInt, StreamOutput::writeLong); - } } @Override @@ -1976,7 +1881,7 @@ public String toString() { .append(", aliasesVersion=") .append(aliasesVersion) .append(", primaryTerms=") - .append(primaryTermsMap) + .append(Arrays.toString(primaryTerms)) .append(", aliases=") .append(aliases) .append(", settings=") @@ -1995,8 +1900,6 @@ public String toString() { .append(context) .append(", ingestionStatus=") .append(ingestionStatus) - .append(", splitShardsMetadata=") - .append(splitShardsMetadata) .append("}") .toString(); } @@ -2035,7 +1938,7 @@ public static class Builder { private long mappingVersion = 1; private long settingsVersion = 1; private long aliasesVersion = 1; - private Map primaryTermsMap = new HashMap<>(); + private long[] primaryTerms = null; private Settings settings = Settings.Builder.EMPTY_SETTINGS; private final Map mappings; private final Map aliases; @@ -2046,7 +1949,6 @@ public static class Builder { private boolean isSystem; private Context context; private IngestionStatus ingestionStatus; - private SplitShardsMetadata splitShardsMetadata; public Builder(String index) { this.index = index; @@ -2066,7 +1968,7 @@ public Builder(IndexMetadata indexMetadata) { this.settingsVersion = indexMetadata.settingsVersion; this.aliasesVersion = indexMetadata.aliasesVersion; this.settings = indexMetadata.getSettings(); - this.primaryTermsMap = new HashMap<>(indexMetadata.primaryTermsMap); + this.primaryTerms = indexMetadata.primaryTerms.clone(); this.mappings = new HashMap<>(indexMetadata.mappings); this.aliases = new HashMap<>(indexMetadata.aliases); this.customMetadata = new HashMap<>(indexMetadata.customData); @@ -2076,7 +1978,6 @@ public Builder(IndexMetadata indexMetadata) { this.isSystem = indexMetadata.isSystem; this.context = indexMetadata.context; this.ingestionStatus = indexMetadata.ingestionStatus; - this.splitShardsMetadata = indexMetadata.splitShardsMetadata; } public Builder index(String index) { @@ -2258,10 +2159,10 @@ public Builder aliasesVersion(final long aliasesVersion) { * See {@link IndexMetadata#primaryTerm(int)} for more information. */ public long primaryTerm(int shardId) { - if (primaryTermsMap.isEmpty()) { + if (primaryTerms == null) { initializePrimaryTerms(); } - return primaryTermsMap.getOrDefault(shardId, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); + return this.primaryTerms[shardId]; } /** @@ -2269,31 +2170,24 @@ public long primaryTerm(int shardId) { * See {@link IndexMetadata#primaryTerm(int)} for more information. */ public Builder primaryTerm(int shardId, long primaryTerm) { - if (primaryTermsMap.isEmpty()) { + if (primaryTerms == null) { initializePrimaryTerms(); } - primaryTermsMap.put(shardId, primaryTerm); + this.primaryTerms[shardId] = primaryTerm; return this; } private void primaryTerms(long[] primaryTerms) { - for (int shard = 0; shard < primaryTerms.length; shard++) { - this.primaryTermsMap.put(shard, primaryTerms[shard]); - } - } - - private void primaryTermsMap(Map primaryTermsMap) { - this.primaryTermsMap = new HashMap<>(primaryTermsMap); + this.primaryTerms = primaryTerms.clone(); } private void initializePrimaryTerms() { - assert primaryTermsMap.isEmpty(); + assert primaryTerms == null; if (numberOfShards() < 0) { throw new IllegalStateException("you must set the number of shards before setting/reading primary terms"); } - for (int i = 0; i < numberOfShards(); i++) { - this.primaryTermsMap.put(i, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); - } + primaryTerms = new long[numberOfShards()]; + Arrays.fill(primaryTerms, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); } public Builder system(boolean system) { @@ -2323,15 +2217,6 @@ public IngestionStatus getIngestionStatus() { return ingestionStatus; } - public Builder splitShardsMetadata(SplitShardsMetadata splitShardsMetadata) { - this.splitShardsMetadata = splitShardsMetadata; - return this; - } - - public SplitShardsMetadata getSplitShardsMetadata() { - return splitShardsMetadata; - } - public IndexMetadata build() { final Map tmpAliases = aliases; Settings tmpSettings = settings; @@ -2345,10 +2230,6 @@ public IndexMetadata build() { } final int numberOfShards = INDEX_NUMBER_OF_SHARDS_SETTING.get(settings); - if (splitShardsMetadata == null) { - splitShardsMetadata = new SplitShardsMetadata.Builder(numberOfShards).build(); - } - if (INDEX_NUMBER_OF_REPLICAS_SETTING.exists(settings) == false) { throw new IllegalArgumentException("must specify number of replicas for index [" + index + "]"); } @@ -2435,14 +2316,12 @@ public IndexMetadata build() { Version indexCreatedVersion = indexCreated(settings); Version indexUpgradedVersion = settings.getAsVersion(IndexMetadata.SETTING_VERSION_UPGRADED, indexCreatedVersion); - if (primaryTermsMap.isEmpty()) { - for (int i = 0; i < numberOfShards; i++) { - primaryTermsMap.put(i, SequenceNumbers.UNASSIGNED_PRIMARY_TERM); - } - } else if (primaryTermsMap.size() != numberOfShards) { + if (primaryTerms == null) { + initializePrimaryTerms(); + } else if (primaryTerms.length != numberOfShards) { throw new IllegalStateException( "primaryTerms length is [" - + primaryTermsMap.size() + + primaryTerms.length + "] but should be equal to number of shards [" + numberOfShards() + "]" @@ -2485,6 +2364,7 @@ public IndexMetadata build() { mappingVersion, settingsVersion, aliasesVersion, + primaryTerms, state, numberOfShards, numberOfReplicas, @@ -2511,9 +2391,7 @@ public IndexMetadata build() { indexTotalRemoteCapablePrimaryShardsPerNodeLimit, isAppendOnlyIndex, context, - ingestionStatus, - splitShardsMetadata, - primaryTermsMap + ingestionStatus ); } @@ -2599,12 +2477,6 @@ public static void toXContent(IndexMetadata indexMetadata, XContentBuilder build builder.endObject(); } - builder.startObject(KEY_PRIMARY_TERMS_MAP); - for (final Map.Entry cursor : indexMetadata.primaryTermsMap.entrySet()) { - builder.field(String.valueOf(cursor.getKey()), cursor.getValue()); - } - builder.endObject(); - builder.startObject(KEY_IN_SYNC_ALLOCATIONS); for (final Map.Entry> cursor : indexMetadata.inSyncAllocationIds.entrySet()) { builder.startArray(String.valueOf(cursor.getKey())); @@ -2634,12 +2506,6 @@ public static void toXContent(IndexMetadata indexMetadata, XContentBuilder build indexMetadata.ingestionStatus.toXContent(builder, params); } - if (indexMetadata.splitShardsMetadata != null) { - builder.startObject(KEY_SPLIT_SHARDS_METADATA); - indexMetadata.splitShardsMetadata.toXContent(builder, params); - builder.endObject(); - } - builder.endObject(); } @@ -2723,22 +2589,6 @@ public static IndexMetadata fromXContent(XContentParser parser) throws IOExcepti builder.context(Context.fromXContent(parser)); } else if (INGESTION_STATUS_KEY.equals(currentFieldName)) { builder.ingestionStatus(IngestionStatus.fromXContent(parser)); - } else if (KEY_SPLIT_SHARDS_METADATA.equals(currentFieldName)) { - builder.splitShardsMetadata(SplitShardsMetadata.parse(parser)); - } else if (KEY_PRIMARY_TERMS_MAP.equals(currentFieldName)) { - Map primaryTermsMap = new HashMap<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token != XContentParser.Token.FIELD_NAME) { - throw new IllegalArgumentException("Unexpected token: " + token); - } - Integer shard = Integer.parseInt(parser.currentName()); - token = parser.nextToken(); - if (token != XContentParser.Token.VALUE_NUMBER) { - throw new IllegalArgumentException("Unexpected token: " + token); - } - primaryTermsMap.put(shard, parser.longValue()); - } - builder.primaryTermsMap(primaryTermsMap); } else { // assume it's custom index metadata builder.putCustom(currentFieldName, parser.mapStrings()); diff --git a/server/src/main/java/org/opensearch/cluster/metadata/ShardRange.java b/server/src/main/java/org/opensearch/cluster/metadata/ShardRange.java deleted file mode 100644 index fcaba4fb41db3..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/metadata/ShardRange.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.metadata; - -import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.common.io.stream.Writeable; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; - -/** - * Represents the hash range assigned to a shard. - * - * @opensearch.experimental - */ -@ExperimentalApi -public record ShardRange(int shardId, int start, int end) implements Comparable, ToXContentFragment, Writeable { - - /** - * Constructs a new shard range from a stream. - * @param in the stream to read from - * @throws IOException if an error occurs while reading from the stream - * @see #writeTo(StreamOutput) - */ - public ShardRange(StreamInput in) throws IOException { - this(in.readVInt(), in.readInt(), in.readInt()); - } - - public boolean contains(int hash) { - return hash >= start && hash <= end; - } - - @Override - public int compareTo(ShardRange o) { - return Integer.compare(start, o.start); - } - - @Override - public String toString() { - return "ShardRange{" + "shardId=" + shardId + ", start=" + start + ", end=" + end + '}'; - } - - @Override - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(shardId); - out.writeInt(start); - out.writeInt(end); - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.startObject().field("shard_id", shardId).field("start", start).field("end", end); - builder.endObject(); - return builder; - } - - public static ShardRange parse(XContentParser parser) throws IOException { - int shardId = -1, start = -1, end = -1; - XContentParser.Token token; - String fieldName = null; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - fieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_NUMBER) { - if ("shard_id".equals(fieldName)) { - shardId = parser.intValue(); - } else if ("start".equals(fieldName)) { - start = parser.intValue(); - } else if ("end".equals(fieldName)) { - end = parser.intValue(); - } - } - } - - return new ShardRange(shardId, start, end); - } -} diff --git a/server/src/main/java/org/opensearch/cluster/metadata/SplitShardsMetadata.java b/server/src/main/java/org/opensearch/cluster/metadata/SplitShardsMetadata.java deleted file mode 100644 index 96542685c007c..0000000000000 --- a/server/src/main/java/org/opensearch/cluster/metadata/SplitShardsMetadata.java +++ /dev/null @@ -1,676 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.metadata; - -import org.opensearch.cluster.AbstractDiffable; -import org.opensearch.cluster.Diff; -import org.opensearch.common.annotation.ExperimentalApi; -import org.opensearch.common.collect.Tuple; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.ToXContentFragment; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Set; -import java.util.TreeSet; - -/** - * Metadata for tracking shard split operations on an index. - * - * @opensearch.experimental - */ -@ExperimentalApi -public class SplitShardsMetadata extends AbstractDiffable implements ToXContentFragment { - private static final int MINIMUM_RANGE_LENGTH_THRESHOLD = 1000; - - private static final String KEY_ROOT_SHARDS_TO_ALL_CHILDREN = "root_shards_to_all_children"; - private static final String KEY_NUMBER_OF_ROOT_SHARDS = "num_of_root_shards"; - private static final String KEY_PARENT_TO_CHILD_SHARDS = "parent_to_child_shards"; - private static final String KEY_MAX_SHARD_ID = "max_shard_id"; - private static final String KEY_IN_PROGRESS_SPLIT_SHARD_IDS = "in_progress_split_shard_id"; - private static final String KEY_ACTIVE_SHARD_IDS = "active_shard_ids"; - - // Following fields are upadated only after split completion and are used to service active shards request. - // Root shard id to flat list of all child shards under root. - private final ShardRange[][] rootShardsToAllChildren; - private final int maxShardId; - private final Set activeShardIds; - - // Following fields can store temporary information about in progress child shards along with info about - // split completed shards. - // Mapping of a parent shard ID to children. - private final Map parentToChildShards; - private final Set inProgressSplitShardIds; - - SplitShardsMetadata( - ShardRange[][] rootShardsToAllChildren, - Map parentToChildShards, - Set inProgressSplitShardIds, - Set activeShardIds, - int maxShardId - ) { - - this.rootShardsToAllChildren = rootShardsToAllChildren; - this.parentToChildShards = Collections.unmodifiableMap(parentToChildShards); - this.maxShardId = maxShardId; - this.inProgressSplitShardIds = Collections.unmodifiableSet(inProgressSplitShardIds); - this.activeShardIds = activeShardIds; - } - - public SplitShardsMetadata(StreamInput in) throws IOException { - int numberOfRootShards = in.readVInt(); - this.rootShardsToAllChildren = new ShardRange[numberOfRootShards][]; - for (int i = 0; i < numberOfRootShards; i++) { - this.rootShardsToAllChildren[i] = in.readOptionalArray(ShardRange::new, ShardRange[]::new); - } - this.maxShardId = in.readVInt(); - this.inProgressSplitShardIds = Collections.unmodifiableSet(in.readSet(StreamInput::readInt)); - this.activeShardIds = Collections.unmodifiableSet(in.readSet(StreamInput::readInt)); - this.parentToChildShards = Collections.unmodifiableMap( - in.readMap(StreamInput::readInt, i -> i.readArray(ShardRange::new, ShardRange[]::new)) - ); - } - - public void writeTo(StreamOutput out) throws IOException { - out.writeVInt(rootShardsToAllChildren.length); - for (ShardRange[] rootShardsToAllChild : rootShardsToAllChildren) { - out.writeOptionalArray(rootShardsToAllChild); - } - out.writeVInt(this.maxShardId); - out.writeCollection(this.inProgressSplitShardIds, StreamOutput::writeInt); - out.writeCollection(this.activeShardIds, StreamOutput::writeInt); - out.writeMap(this.parentToChildShards, StreamOutput::writeInt, StreamOutput::writeArray); - } - - public int getShardIdOfHash(int rootShardId, int hash, boolean includeInProgressChildren) { - // First check if we have child shards against this root shard. - if (rootShardsToAllChildren[rootShardId] == null) { - if (includeInProgressChildren && parentToChildShards.containsKey(rootShardId)) { - ShardRange shardRange = binarySearchShards(parentToChildShards.get(rootShardId), hash); - assert shardRange != null; - return shardRange.shardId(); - } - return rootShardId; - } - - ShardRange[] existingChildShards = rootShardsToAllChildren[rootShardId]; - ShardRange shardRange = binarySearchShards(existingChildShards, hash); - assert shardRange != null; - - if (includeInProgressChildren && parentToChildShards.containsKey(shardRange.shardId())) { - shardRange = binarySearchShards(parentToChildShards.get(shardRange.shardId()), hash); - } - assert shardRange != null; - - return shardRange.shardId(); - } - - private ShardRange binarySearchShards(ShardRange[] childShards, int hash) { - int low = 0, high = childShards.length - 1; - while (low <= high) { - int mid = low + (high - low) / 2; - ShardRange midShard = childShards[mid]; - if (midShard.contains(hash)) { - return midShard; - } else if (hash < midShard.start()) { - high = mid - 1; - } else { - low = mid + 1; - } - } - return null; - } - - @Override - public String toString() { - StringBuilder parentToChildMap = new StringBuilder(); - for (Map.Entry entry : parentToChildShards.entrySet()) { - parentToChildMap.append("["); - parentToChildMap.append(entry.getKey()).append("=").append(Arrays.toString(entry.getValue())); - parentToChildMap.append("]"); - } - return "SplitShardsMetadata{" - + "rootShardsToAllChildren=" - + Arrays.toString(rootShardsToAllChildren) - + ", maxShardId=" - + maxShardId - + ", activeShardIds=" - + activeShardIds - + ", parentToChildShards=" - + parentToChildMap - + ", inProgressSplitShardIds=" - + inProgressSplitShardIds - + '}'; - } - - public int getNumberOfRootShards() { - return rootShardsToAllChildren.length; - } - - public int getNumberOfShards() { - return activeShardIds.size(); - } - - public List getRootShards() { - List rootShardList = new ArrayList<>(); - for (int i = 0; i < rootShardsToAllChildren.length; i++) { - rootShardList.add(i); - } - return rootShardList; - } - - public ShardRange[] getChildShardsOfParent(int shardId) { - if (parentToChildShards.containsKey(shardId) == false) { - return null; - } - - ShardRange[] childShards = new ShardRange[parentToChildShards.get(shardId).length]; - int childShardIdx = 0; - for (ShardRange childShard : parentToChildShards.get(shardId)) { - childShards[childShardIdx++] = childShard; - } - return childShards; - } - - public Set getChildShardIdsOfParent(int shardId) { - Set childShardIds = new HashSet<>(); - if (parentToChildShards.containsKey(shardId) == false) { - return childShardIds; - } - - for (ShardRange childShard : parentToChildShards.get(shardId)) { - childShardIds.add(childShard.shardId()); - } - return childShardIds; - } - - public int inProgressChildShardsCount() { - int total = 0; - for (Integer parent : inProgressSplitShardIds) { - total += parentToChildShards.get(parent).length; - } - return total; - } - - public Iterator getActiveShardIterator() { - return new HashSet<>(activeShardIds).iterator(); - } - - // Visible for testing - static void validateShardRanges(int shardId, ShardRange[] shardRanges) { - Integer start = null; - int lowerBound = Integer.MIN_VALUE; - int upperBound = Integer.MAX_VALUE; - for (ShardRange shardRange : shardRanges) { - validateBounds(shardRange, start, lowerBound); - long rangeEnd = shardRange.end(); - long rangeLength = rangeEnd - shardRange.start() + 1; - if (rangeLength < MINIMUM_RANGE_LENGTH_THRESHOLD) { - throw new IllegalArgumentException( - "Shard range from " - + shardRange.start() - + " to " - + shardRange.end() - + " is below shard range threshold of " - + MINIMUM_RANGE_LENGTH_THRESHOLD - ); - } - - start = shardRange.end(); - } - - if (start == null) { - throw new IllegalArgumentException("No shard range defined for child shards of shard " + shardId); - } - - if (start != upperBound) { - throw new IllegalArgumentException( - "Shard range from " + (start + 1) + " to " + upperBound + " is missing from the list of shard ranges" - ); - } - } - - private static void validateBounds(ShardRange shardRange, Integer start, long parentStart) { - if (start == null) { - if (shardRange.start() != parentStart) { - throw new IllegalArgumentException( - "Shard range from " + parentStart + " to " + (shardRange.start() - 1) + " is missing from the list of shard ranges" - ); - } - } else if (shardRange.start() != start + 1) { - String errorMessage; - if (shardRange.start() < start + 1) { - errorMessage = "Shard range overlaps from " + shardRange.start() + " to " + start; - } else { - errorMessage = "Shard range from " - + (start + 1) - + " to " - + (shardRange.start() - 1) - + " is missing from the list of shard ranges"; - } - throw new IllegalArgumentException(errorMessage); - } - } - - /** - * Builder for {@link SplitShardsMetadata}. - * - * @opensearch.experimental - */ - public static class Builder { - private final ShardRange[][] rootShardsToAllChildren; - private final Map parentToChildShards; - private int maxShardId; - private final Set inProgressSplitShardIds; - private final Set activeShardIds; - - public Builder(int numberOfShards) { - maxShardId = numberOfShards - 1; - rootShardsToAllChildren = new ShardRange[numberOfShards][]; - parentToChildShards = new HashMap<>(); - inProgressSplitShardIds = new HashSet<>(); - activeShardIds = new HashSet<>(); - for (int i = 0; i < numberOfShards; i++) { - activeShardIds.add(i); - } - } - - public Builder(SplitShardsMetadata splitShardsMetadata) { - this.maxShardId = splitShardsMetadata.maxShardId; - - this.rootShardsToAllChildren = new ShardRange[splitShardsMetadata.rootShardsToAllChildren.length][]; - Set activeShardIds = new HashSet<>(); - for (int i = 0; i < splitShardsMetadata.rootShardsToAllChildren.length; i++) { - if (splitShardsMetadata.rootShardsToAllChildren[i] != null) { - this.rootShardsToAllChildren[i] = new ShardRange[splitShardsMetadata.rootShardsToAllChildren[i].length]; - int j = 0; - for (ShardRange childShard : splitShardsMetadata.rootShardsToAllChildren[i]) { - this.rootShardsToAllChildren[i][j++] = childShard; - activeShardIds.add(childShard.shardId()); - } - } else { - activeShardIds.add(i); - } - } - - this.parentToChildShards = new HashMap<>(); - for (Integer parentShardId : splitShardsMetadata.parentToChildShards.keySet()) { - // Getting a copy of child shards for this parent. - ShardRange[] childShards = splitShardsMetadata.getChildShardsOfParent(parentShardId); - this.parentToChildShards.put(parentShardId, childShards); - } - - inProgressSplitShardIds = new HashSet<>(splitShardsMetadata.inProgressSplitShardIds); - this.activeShardIds = activeShardIds; - } - - /** - * Create metadata of new child shards for the provided shard id. - * @param splitShardId Shard id to split - * @param numberOfChildren Number of child shards this shard is going to have. - */ - public List splitShard(int splitShardId, int numberOfChildren) { - if (inProgressSplitShardIds.contains(splitShardId) || parentToChildShards.containsKey(splitShardId)) { - throw new IllegalArgumentException("Split of shard [" + splitShardId + "] is already in progress or completed."); - } - - Tuple shardTuple = findRootAndShard(splitShardId, rootShardsToAllChildren); - if (shardTuple == null) { - throw new IllegalArgumentException("Invalid shard id provided for splitting"); - } - ShardRange parentShard = shardTuple.v2(); - - long rangeSize = ((long) parentShard.end() - parentShard.start() + 1) / numberOfChildren; - if (rangeSize <= MINIMUM_RANGE_LENGTH_THRESHOLD) { - throw new IllegalArgumentException("Cannot split shard [" + splitShardId + "] further."); - } - - Set inProgressChildShardIds = getInProgressChildShardIds(); - inProgressSplitShardIds.add(splitShardId); - - List allConsumedShardIds = new ArrayList<>(); - for (int i = 0; i < rootShardsToAllChildren.length; i++) { - if (rootShardsToAllChildren[i] == null) { - allConsumedShardIds.add(i); - } - } - for (Integer splitId : parentToChildShards.keySet()) { - allConsumedShardIds.add(splitId); - for (ShardRange shard : parentToChildShards.get(splitId)) { - allConsumedShardIds.add(shard.shardId()); - } - } - - Set shardIdHoles = findHoles(allConsumedShardIds); - List newChildShardsList = new ArrayList<>(); - long start = parentShard.start(); - - int nextChildShardId = maxShardId, childShardId; - for (int i = 0; i < numberOfChildren; ++i) { - if (shardIdHoles.isEmpty()) { - nextChildShardId++; - while (inProgressChildShardIds.contains(nextChildShardId)) { - assert activeShardIds.contains(nextChildShardId) == false; - nextChildShardId++; - } - childShardId = nextChildShardId; - } else { - childShardId = shardIdHoles.iterator().next(); - shardIdHoles.remove(childShardId); - nextChildShardId = Math.max(nextChildShardId, childShardId); - } - assert !inProgressChildShardIds.contains(childShardId); - assert !activeShardIds.contains(childShardId); - inProgressChildShardIds.add(childShardId); - - long end = i == numberOfChildren - 1 ? parentShard.end() : start + rangeSize - 1; - ShardRange childShard = new ShardRange(childShardId, (int) start, (int) end); - newChildShardsList.add(childShard); - start = end + 1; - } - - ShardRange[] newShardRanges = newChildShardsList.toArray(new ShardRange[0]); - - // Get existing childShardRanges under rootShard - List shardsUnderRoot = rootShardsToAllChildren[shardTuple.v1()] == null - ? new ArrayList<>() - : new ArrayList<>(Arrays.asList(rootShardsToAllChildren[shardTuple.v1()])); - shardsUnderRoot.remove(shardTuple.v2()); // Remove parent shard range - shardsUnderRoot.addAll(List.of(newShardRanges)); // Add child shard range - ShardRange[] newShardsUnderRoot = shardsUnderRoot.toArray(new ShardRange[0]); - Arrays.sort(newShardsUnderRoot); - - validateShardRanges(splitShardId, newShardsUnderRoot); - - parentToChildShards.put(splitShardId, newShardRanges); - return Collections.unmodifiableList(newChildShardsList); - } - - private Set findHoles(List allConsumedShardIds) { - Set gaps = new TreeSet<>(); - - if (allConsumedShardIds == null || allConsumedShardIds.size() <= 1) { - return gaps; - } - - Collections.sort(allConsumedShardIds); - for (int i = 1; i < allConsumedShardIds.size(); i++) { - int current = allConsumedShardIds.get(i); - int previous = allConsumedShardIds.get(i - 1); - - if (current - previous > 1) { - for (int j = previous + 1; j < current; j++) { - gaps.add(j); - } - } - } - - return gaps; - } - - public void updateSplitMetadataForChildShards(int sourceShardId, Set newChildShardIds) { - Tuple shardRangeTuple = findRootAndShard(sourceShardId, rootShardsToAllChildren); - - assert inProgressSplitShardIds.contains(sourceShardId); - assert newChildShardIds.size() == parentToChildShards.get(sourceShardId).length; - for (ShardRange childShard : parentToChildShards.get(sourceShardId)) { - assert newChildShardIds.contains(childShard.shardId()); - } - - List shardsUnderRoot = rootShardsToAllChildren[shardRangeTuple.v1()] == null - ? new ArrayList<>() - : new ArrayList<>(Arrays.asList(rootShardsToAllChildren[shardRangeTuple.v1()])); - shardsUnderRoot.remove(shardRangeTuple.v2()); - shardsUnderRoot.addAll(Arrays.asList(parentToChildShards.get(sourceShardId))); - ShardRange[] newShardsUnderRoot = shardsUnderRoot.toArray(new ShardRange[0]); - Arrays.sort(newShardsUnderRoot); - validateShardRanges(shardRangeTuple.v1(), newShardsUnderRoot); - - int currentMaxShardId = maxShardId; - for (Integer newChildId : newChildShardIds) { - assert activeShardIds.contains(newChildId) == false; - activeShardIds.add(newChildId); - currentMaxShardId = Math.max(currentMaxShardId, newChildId); - } - - activeShardIds.remove(sourceShardId); - maxShardId = currentMaxShardId; - rootShardsToAllChildren[shardRangeTuple.v1()] = newShardsUnderRoot; - inProgressSplitShardIds.remove(sourceShardId); - } - - private Set getInProgressChildShardIds() { - Set inProgressChildShardIds = new HashSet<>(); - for (Integer inProgressParent : inProgressSplitShardIds) { - assert parentToChildShards.containsKey(inProgressParent); - ShardRange[] childShards = parentToChildShards.get(inProgressParent); - for (ShardRange childShard : childShards) { - inProgressChildShardIds.add(childShard.shardId()); - } - } - return inProgressChildShardIds; - } - - public void cancelSplit(int sourceShardId) { - assert inProgressSplitShardIds.contains(sourceShardId); - inProgressSplitShardIds.remove(sourceShardId); - parentToChildShards.remove(sourceShardId); - } - - public SplitShardsMetadata build() { - return new SplitShardsMetadata( - this.rootShardsToAllChildren, - this.parentToChildShards, - this.inProgressSplitShardIds, - this.activeShardIds, - this.maxShardId - ); - } - } - - private static Tuple findRootAndShard(int shardId, ShardRange[][] rootShardsToAllChildren) { - ShardRange[] allChildren; - for (int rootShardId = 0; rootShardId < rootShardsToAllChildren.length; rootShardId++) { - allChildren = rootShardsToAllChildren[rootShardId]; - if (allChildren != null) { - for (ShardRange shardUnderRoot : allChildren) { - if (shardUnderRoot.shardId() == shardId) { - return new Tuple<>(rootShardId, shardUnderRoot); - } - } - } - } - - if (shardId < rootShardsToAllChildren.length && rootShardsToAllChildren[shardId] == null) { - // We are splitting a root shard in this case. - return new Tuple<>(shardId, new ShardRange(shardId, Integer.MIN_VALUE, Integer.MAX_VALUE)); - } - - throw new IllegalArgumentException("Shard ID doesn't exist in the current list of shard ranges"); - } - - public Set getInProgressSplitShardIds() { - return inProgressSplitShardIds; - } - - public boolean isSplitOfShardInProgress(int shardId) { - return inProgressSplitShardIds.contains(shardId); - } - - public boolean isSplitParent(int shardId) { - return activeShardIds.contains(shardId) == false && parentToChildShards.containsKey(shardId); - } - - public boolean isRecoveringChild(int shardId, int parentShardId) { - if (!inProgressSplitShardIds.contains(parentShardId)) { - return false; - } - - for (ShardRange childShard : parentToChildShards.get(parentShardId)) { - if (childShard.shardId() == shardId) { - return true; - } - } - return false; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (!(o instanceof SplitShardsMetadata)) return false; - - SplitShardsMetadata that = (SplitShardsMetadata) o; - - if (maxShardId != that.maxShardId) return false; - if (!inProgressSplitShardIds.equals(that.inProgressSplitShardIds)) return false; - if (!Arrays.deepEquals(rootShardsToAllChildren, that.rootShardsToAllChildren)) return false; - if (!activeShardIds.equals(that.activeShardIds)) return false; - if (parentToChildShards.size() != that.parentToChildShards.size()) return false; - for (Integer key : parentToChildShards.keySet()) { - if (!Arrays.deepEquals(parentToChildShards.get(key), that.parentToChildShards.get(key))) { - return false; - } - } - return true; - } - - @Override - public int hashCode() { - int result = Arrays.deepHashCode(rootShardsToAllChildren); - for (Map.Entry entry : parentToChildShards.entrySet()) { - result = 31 * result + Objects.hash(entry.getKey(), Arrays.deepHashCode(entry.getValue())); - } - result = 31 * result + maxShardId; - result = 31 * result + inProgressSplitShardIds.hashCode(); - result = 31 * result + activeShardIds.hashCode(); - return result; - } - - @Override - public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { - builder.field(KEY_NUMBER_OF_ROOT_SHARDS, rootShardsToAllChildren.length); - builder.field(KEY_MAX_SHARD_ID, maxShardId); - if (!inProgressSplitShardIds.isEmpty()) { - builder.field(KEY_IN_PROGRESS_SPLIT_SHARD_IDS, new ArrayList<>(inProgressSplitShardIds)); - } - builder.field(KEY_ACTIVE_SHARD_IDS, new ArrayList<>(activeShardIds)); - builder.startObject(KEY_ROOT_SHARDS_TO_ALL_CHILDREN); - for (int rootShardId = 0; rootShardId < rootShardsToAllChildren.length; rootShardId++) { - ShardRange[] childShards = rootShardsToAllChildren[rootShardId]; - if (childShards != null) { - builder.startArray(String.valueOf(rootShardId)); - for (ShardRange childShard : childShards) { - childShard.toXContent(builder, params); - } - builder.endArray(); - } - } - builder.endObject(); - - builder.startObject(KEY_PARENT_TO_CHILD_SHARDS); - for (Integer parentShardId : parentToChildShards.keySet()) { - builder.startArray(String.valueOf(parentShardId)); - for (ShardRange childShard : parentToChildShards.get(parentShardId)) { - childShard.toXContent(builder, params); - } - builder.endArray(); - } - builder.endObject(); - - return builder; - } - - public static SplitShardsMetadata parse(XContentParser parser) throws IOException { - XContentParser.Token token; - String currentFieldName = null; - int maxShardId = -1; - Set inProgressSplitShardIds = new HashSet<>(); - Set activeShardIds = new HashSet<>(); - ShardRange[][] rootShardsToAllChildren = null; - Map tempShardIdToChildShards = new HashMap<>(); - int numberOfRootShards = -1; - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.VALUE_NUMBER) { - if (KEY_MAX_SHARD_ID.equals(currentFieldName)) { - maxShardId = parser.intValue(); - } else if (KEY_NUMBER_OF_ROOT_SHARDS.equals(currentFieldName)) { - numberOfRootShards = parser.intValue(); - } - } else if (token == XContentParser.Token.START_OBJECT) { - if (KEY_ROOT_SHARDS_TO_ALL_CHILDREN.equals(currentFieldName)) { - Map rootShards = parseShardsMap(parser); - rootShardsToAllChildren = new ShardRange[numberOfRootShards][]; - for (Map.Entry entry : rootShards.entrySet()) { - rootShardsToAllChildren[entry.getKey()] = entry.getValue(); - } - } else if (KEY_PARENT_TO_CHILD_SHARDS.equals(currentFieldName)) { - tempShardIdToChildShards = parseShardsMap(parser); - } - } else if (token == XContentParser.Token.START_ARRAY) { - if (KEY_IN_PROGRESS_SPLIT_SHARD_IDS.equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - inProgressSplitShardIds.add(parser.intValue()); - } - } else if (KEY_ACTIVE_SHARD_IDS.equals(currentFieldName)) { - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - activeShardIds.add(parser.intValue()); - } - } - } - } - - return new SplitShardsMetadata( - rootShardsToAllChildren, - tempShardIdToChildShards, - inProgressSplitShardIds, - activeShardIds, - maxShardId - ); - } - - private static Map parseShardsMap(XContentParser parser) throws IOException { - XContentParser.Token token; - String currentFieldName = null; - Map shardsMap = new HashMap<>(); - while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { - if (token == XContentParser.Token.FIELD_NAME) { - currentFieldName = parser.currentName(); - } else if (token == XContentParser.Token.START_ARRAY) { - List childShardRanges = new ArrayList<>(); - while (parser.nextToken() != XContentParser.Token.END_ARRAY) { - ShardRange shardRange = ShardRange.parse(parser); - childShardRanges.add(shardRange); - } - assert currentFieldName != null; - Integer parentShard = Integer.parseInt(currentFieldName); - shardsMap.put(parentShard, childShardRanges.toArray(new ShardRange[0])); - } - } - - return shardsMap; - } - - public static Diff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(SplitShardsMetadata::new, in); - } - -} diff --git a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java index 210e9828876df..a2cbf191572b6 100644 --- a/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java +++ b/server/src/main/java/org/opensearch/cluster/routing/OperationRouting.java @@ -499,26 +499,12 @@ protected IndexShardRoutingTable shards(ClusterState clusterState, String index, return clusterState.getRoutingTable().shardRoutingTable(index, shardId); } - public ShardId shardWithRecoveringChild(ClusterState clusterState, String index, String id, String routing, Index shardIndex) { - int shardId = generateShardId(indexMetadata(clusterState, index), id, routing, true); - return new ShardId(shardIndex, shardId); - } - public ShardId shardId(ClusterState clusterState, String index, String id, @Nullable String routing) { IndexMetadata indexMetadata = indexMetadata(clusterState, index); return new ShardId(indexMetadata.getIndex(), generateShardId(indexMetadata, id, routing)); } public static int generateShardId(IndexMetadata indexMetadata, @Nullable String id, @Nullable String routing) { - return generateShardId(indexMetadata, id, routing, false); - } - - public static int generateShardId( - IndexMetadata indexMetadata, - @Nullable String id, - @Nullable String routing, - boolean includeInProgressChild - ) { final String effectiveRouting; final int partitionOffset; @@ -543,26 +529,15 @@ public static int generateShardId( return VirtualShardRoutingHelper.resolvePhysicalShardId(indexMetadata, vShardId); } - return calculateShardIdOfChild(indexMetadata, effectiveRouting, partitionOffset, includeInProgressChild); + return calculateScaledShardId(indexMetadata, effectiveRouting, partitionOffset); } private static int calculateScaledShardId(IndexMetadata indexMetadata, String effectiveRouting, int partitionOffset) { - return calculateShardIdOfChild(indexMetadata, effectiveRouting, partitionOffset, false); - } - - private static int calculateShardIdOfChild( - IndexMetadata indexMetadata, - String effectiveRouting, - int partitionOffset, - boolean includeInProgressChild - ) { final int hash = Murmur3HashFunction.hash(effectiveRouting) + partitionOffset; // we don't use IMD#getNumberOfShards since the index might have been shrunk such that we need to use the size // of original index to hash documents - int rootShardId = Math.floorMod(hash, indexMetadata.getRoutingNumShards()) / indexMetadata.getRoutingFactor(); - - return indexMetadata.getSplitShardsMetadata().getShardIdOfHash(rootShardId, hash, includeInProgressChild); + return Math.floorMod(hash, indexMetadata.getRoutingNumShards()) / indexMetadata.getRoutingFactor(); } private void checkPreferenceBasedRoutingAllowed(Preference preference, @Nullable WeightedRoutingMetadata weightedRoutingMetadata) { diff --git a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java index 47f1588cd5fbc..ce54c71938830 100644 --- a/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java +++ b/server/src/test/java/org/opensearch/action/admin/cluster/reroute/ClusterRerouteResponseTests.java @@ -142,9 +142,6 @@ public void testToXContent() throws IOException { + " \"primary_terms\" : {\n" + " \"0\" : 0\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 0\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [ ]\n" + " },\n" @@ -152,15 +149,6 @@ public void testToXContent() throws IOException { + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" @@ -263,9 +251,6 @@ public void testToXContent() throws IOException { + " \"primary_terms\" : {\n" + " \"0\" : 0\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 0\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [ ]\n" + " },\n" @@ -273,15 +258,6 @@ public void testToXContent() throws IOException { + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" diff --git a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java index 6657cee2e26dc..36d8898ae81d1 100644 --- a/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java +++ b/server/src/test/java/org/opensearch/cluster/ClusterStateTests.java @@ -264,9 +264,6 @@ public void testToXContent() throws IOException { + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [\n" + " \"allocationId\"\n" @@ -281,15 +278,6 @@ public void testToXContent() throws IOException { + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" @@ -481,9 +469,6 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [\n" + " \"allocationId\"\n" @@ -498,15 +483,6 @@ public void testToXContent_FlatSettingTrue_ReduceMappingFalse() throws IOExcepti + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" @@ -705,9 +681,6 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [\n" + " \"allocationId\"\n" @@ -722,15 +695,6 @@ public void testToXContent_FlatSettingFalse_ReduceMappingTrue() throws IOExcepti + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" @@ -876,9 +840,6 @@ public void testToXContentSameTypeName() throws IOException { + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [ ]\n" + " },\n" @@ -886,15 +847,6 @@ public void testToXContentSameTypeName() throws IOException { + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" diff --git a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java index c12ed28bff88e..11575308a2f95 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/IndexMetadataTests.java @@ -755,104 +755,4 @@ public void testAllActivePullBasedIngestionSettings() { isAllActiveIngestionEnabled = IndexMetadata.INGESTION_SOURCE_ALL_ACTIVE_INGESTION_SETTING.get(settings6); assertFalse(isAllActiveIngestionEnabled); } - - public void testPrimaryTermsMapXContentRoundTrip() throws IOException { - int numShards = randomFrom(2, 4, 8); - IndexMetadata.Builder builder = IndexMetadata.builder("test-primary-terms-map") - .settings( - Settings.builder() - .put("index.version.created", 1 ^ MASK) - .put("index.number_of_shards", numShards) - .put("index.number_of_replicas", 0) - .build() - ) - .creationDate(randomLong()) - .setRoutingNumShards(numShards * 2); - - // Set distinct primary terms per shard - for (int i = 0; i < numShards; i++) { - builder.primaryTerm(i, randomLongBetween(1, 100)); - } - IndexMetadata metadata = builder.build(); - - // XContent round-trip - final XContentBuilder xContentBuilder = JsonXContent.contentBuilder(); - xContentBuilder.startObject(); - IndexMetadata.FORMAT.toXContent(xContentBuilder, metadata); - xContentBuilder.endObject(); - XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(xContentBuilder)); - IndexMetadata fromXContent = IndexMetadata.fromXContent(parser); - - assertEquals(metadata, fromXContent); - for (int i = 0; i < numShards; i++) { - assertEquals(metadata.primaryTerm(i), fromXContent.primaryTerm(i)); - } - } - - public void testPrimaryTermsMapStreamRoundTrip() throws IOException { - int numShards = randomFrom(2, 4, 8); - IndexMetadata.Builder builder = IndexMetadata.builder("test-primary-terms-map-stream") - .settings( - Settings.builder() - .put("index.version.created", 1 ^ MASK) - .put("index.number_of_shards", numShards) - .put("index.number_of_replicas", 0) - .build() - ) - .creationDate(randomLong()) - .setRoutingNumShards(numShards * 2); - - for (int i = 0; i < numShards; i++) { - builder.primaryTerm(i, randomLongBetween(1, 100)); - } - IndexMetadata metadata = builder.build(); - - // Stream round-trip - final BytesStreamOutput out = new BytesStreamOutput(); - metadata.writeTo(out); - try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { - IndexMetadata deserialized = IndexMetadata.readFrom(in); - assertEquals(metadata, deserialized); - for (int i = 0; i < numShards; i++) { - assertEquals(metadata.primaryTerm(i), deserialized.primaryTerm(i)); - } - } - } - - public void testPrimaryTermsMapDiffRoundTrip() throws IOException { - int numShards = 4; - IndexMetadata.Builder beforeBuilder = IndexMetadata.builder("test-diff") - .settings( - Settings.builder() - .put("index.version.created", 1 ^ MASK) - .put("index.number_of_shards", numShards) - .put("index.number_of_replicas", 0) - .build() - ) - .creationDate(randomLong()) - .setRoutingNumShards(numShards * 2); - for (int i = 0; i < numShards; i++) { - beforeBuilder.primaryTerm(i, 1); - } - IndexMetadata before = beforeBuilder.build(); - - // Bump primary term on shard 2 - IndexMetadata.Builder afterBuilder = IndexMetadata.builder(before); - afterBuilder.primaryTerm(2, 5); - afterBuilder.version(before.getVersion() + 1); - IndexMetadata after = afterBuilder.build(); - - Diff diff = new IndexMetadata.IndexMetadataDiff(before, after); - - // Serialize and deserialize the diff - final BytesStreamOutput out = new BytesStreamOutput(); - diff.writeTo(out); - try (StreamInput in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), writableRegistry())) { - Diff deserializedDiff = IndexMetadata.readDiffFrom(in); - IndexMetadata applied = deserializedDiff.apply(before); - assertEquals(after, applied); - assertEquals(5, applied.primaryTerm(2)); - assertEquals(1, applied.primaryTerm(0)); - } - } } diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ShardRangeTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ShardRangeTests.java deleted file mode 100644 index a6f1d8d36e620..0000000000000 --- a/server/src/test/java/org/opensearch/cluster/metadata/ShardRangeTests.java +++ /dev/null @@ -1,387 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.metadata; - -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.LoggingDeprecationHandler; -import org.opensearch.common.xcontent.XContentFactory; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.common.io.stream.StreamOutput; -import org.opensearch.core.xcontent.NamedXContentRegistry; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.io.OutputStream; - -public class ShardRangeTests extends OpenSearchTestCase { - - /** - * Test compareTo method with equal start values - */ - - public void testCompareToWithEqualStartValues() { - ShardRange range1 = new ShardRange(1, 0, 100); - ShardRange range2 = new ShardRange(2, 0, 200); - - assertEquals(0, range1.compareTo(range2)); - } - - /** - * Test compareTo method with overlapping ranges - */ - - public void testCompareToWithOverlappingRanges() { - ShardRange range1 = new ShardRange(1, 0, 100); - ShardRange range2 = new ShardRange(2, 50, 150); - - assertTrue(range1.compareTo(range2) < 0); - assertTrue(range2.compareTo(range1) > 0); - } - - /** - * Test compareTo method with null input - */ - - public void testCompareToWithNull() { - ShardRange range = new ShardRange(1, 0, 100); - expectThrows(NullPointerException.class, () -> range.compareTo(null)); - } - - public void testContainsWithHashInBetween() { - /** - * Test that the contains method returns true when the hash is equal to the end value. - */ - ShardRange shardRange = new ShardRange(0, 10, 20); - assertTrue(shardRange.contains(15)); - } - - public void testContainsWithHashEqualToEnd() { - /** - * Test that the contains method returns true when the hash is equal to the end value. - */ - ShardRange shardRange = new ShardRange(0, 10, 20); - assertTrue(shardRange.contains(20)); - } - - public void testContainsWithHashEqualToStart() { - /** - * Test that the contains method returns true when the hash is equal to the start value. - */ - ShardRange shardRange = new ShardRange(0, 10, 20); - assertTrue(shardRange.contains(10)); - } - - public void testContainsWithHashGreaterThanEnd() { - /** - * Test that the contains method returns false when the hash is greater than the end value. - */ - ShardRange shardRange = new ShardRange(0, 10, 20); - assertFalse(shardRange.contains(25)); - } - - public void testContainsWithHashLessThanStart() { - /** - * Test that the contains method returns false when the hash is less than the start value. - */ - ShardRange shardRange = new ShardRange(0, 10, 20); - assertFalse(shardRange.contains(5)); - } - - public void testContainsWithNegativeHash() { - /** - * Test that the contains method handles negative hash values correctly. - */ - ShardRange shardRange = new ShardRange(0, -10, 10); - assertTrue(shardRange.contains(-5)); - assertFalse(shardRange.contains(-15)); - } - - /** - * Tests the equals method of ShardRange class for various scenarios. - */ - - public void testEqualsMethod() { - // Create ShardRange objects for testing - ShardRange range1 = new ShardRange(1, 0, 100); - ShardRange range2 = new ShardRange(1, 0, 100); - ShardRange range3 = new ShardRange(2, 0, 100); - ShardRange range4 = new ShardRange(1, 10, 100); - ShardRange range5 = new ShardRange(1, 0, 90); - - // Test equality with itself - assertTrue("A ShardRange should be equal to itself", range1.equals(range1)); - - // Test equality with an identical ShardRange - assertTrue("Two identical ShardRanges should be equal", range1.equals(range2)); - - // Test inequality with different shardId - assertFalse("ShardRanges with different shardIds should not be equal", range1.equals(range3)); - - // Test inequality with different start value - assertFalse("ShardRanges with different start values should not be equal", range1.equals(range4)); - - // Test inequality with different end value - assertFalse("ShardRanges with different end values should not be equal", range1.equals(range5)); - - // Test inequality with null - assertFalse("A ShardRange should not be equal to null", range1.equals(null)); - } - - /** - * Test case for getEnd() method of ShardRange class - * Verifies that the method correctly returns the end value - */ - - public void testGetEndReturnsCorrectValue() { - // Arrange - int expectedEnd = 100; - ShardRange shardRange = new ShardRange(0, 0, expectedEnd); - - // Act - int actualEnd = shardRange.end(); - - // Assert - assertEquals("getEnd() should return the correct end value", expectedEnd, actualEnd); - } - - /** - * Test case for getShardId() method - * Verifies that the method correctly returns the shard ID - */ - - public void testGetShardIdReturnsCorrectValue() { - // Arrange - int expectedShardId = 5; - ShardRange shardRange = new ShardRange(expectedShardId, 0, 100); - - // Act - int actualShardId = shardRange.shardId(); - - // Assert - assertEquals("getShardId() should return the correct shard ID", expectedShardId, actualShardId); - } - - /** - * Test that getStart returns the correct value for a valid ShardRange - */ - - public void testGetStartReturnsCorrectValue() { - ShardRange shardRange = new ShardRange(1, 100, 200); - assertEquals(100, shardRange.start()); - } - - /** - * Test hashCode() consistency for equal objects - */ - - public void testHashCodeConsistency() { - ShardRange shardRange1 = new ShardRange(1, 100, 200); - ShardRange shardRange2 = new ShardRange(1, 100, 200); - assertEquals(shardRange1.hashCode(), shardRange2.hashCode()); - } - - /** - * Test hashCode() with zero values for all fields - */ - - public void testHashCodeWithZeroValues() { - ShardRange shardRange = new ShardRange(0, 0, 0); - assertEquals(0, shardRange.hashCode()); - } - - /** - * Test the toString method of ShardRange - * Verifies that the toString method returns the expected string representation - */ - - public void testToStringReturnsCorrectRepresentation() { - // Arrange - int shardId = 1; - int start = 0; - int end = 100; - ShardRange shardRange = new ShardRange(shardId, start, end); - - // Act - String result = shardRange.toString(); - - // Assert - String expected = "ShardRange{shardId=1, start=0, end=100}"; - assertEquals("The toString method should return the correct string representation", expected, result); - } - - /** - * Test that toXContent correctly serializes a ShardRange object to JSON - */ - - public void testToXContentSerializesShardRangeCorrectly() throws IOException { - int shardId = 1; - int start = 0; - int end = 100; - ShardRange shardRange = new ShardRange(shardId, start, end); - - XContentBuilder builder = XContentFactory.jsonBuilder(); // Changed from contentBuilder - builder = shardRange.toXContent(builder, null); - - String expected = "{\"shard_id\":1,\"start\":0,\"end\":100}"; - assertEquals(expected, builder.toString()); // Also switched the order of expected and actual - } - - public void testToXContentWithNullBuilder() { - /** - * Test toXContent method with null XContentBuilder - * This test verifies that a NullPointerException is thrown when a null XContentBuilder is provided - */ - ShardRange shardRange = new ShardRange(1, 0, 100); - expectThrows(NullPointerException.class, () -> shardRange.toXContent(null, null)); - } - - public void testToXContentIOException() throws IOException { - ShardRange shardRange = new ShardRange(1, 0, 100); - - // Create a real XContentBuilder that writes to a broken OutputStream - OutputStream failingStream = new OutputStream() { - @Override - public void write(int b) throws IOException { - throw new IOException("Simulated IOException"); - } - - @Override - public void write(byte[] b) throws IOException { - throw new IOException("Simulated IOException"); - } - - @Override - public void write(byte[] b, int off, int len) throws IOException { - throw new IOException("Simulated IOException"); - } - }; - - XContentBuilder builder = XContentFactory.jsonBuilder(failingStream); - builder.startObject(); // Start the JSON object - - // The actual test - this should throw IOException when trying to write - expectThrows(IOException.class, () -> { - shardRange.toXContent(builder, null); - builder.endObject(); // End the JSON object - builder.close(); // Force flush/close which will trigger the write - }); - } - - public void testWriteToWithIOException() { - /** - * Test that writeTo properly propagates IOException - */ - ShardRange shardRange = new ShardRange(1, 0, 100); - StreamOutput failingOutput = new StreamOutput() { - @Override - public void writeByte(byte b) throws IOException { - throw new IOException("Simulated IO failure"); - } - - @Override - public void writeBytes(byte[] b, int offset, int length) throws IOException { - throw new IOException("Simulated IO failure"); - } - - @Override - public void flush() throws IOException { - throw new IOException("Simulated IO failure"); - } - - @Override - public void close() throws IOException { - throw new IOException("Simulated IO failure"); - } - - @Override - public void reset() throws IOException { - throw new IOException("Simulated IO failure"); - } - }; - - expectThrows(IOException.class, () -> shardRange.writeTo(failingOutput)); - } - - public void testWriteToWithNullStreamOutput() { - /** - * Test that writeTo throws NullPointerException when StreamOutput is null - */ - ShardRange shardRange = new ShardRange(1, 0, 100); - expectThrows(NullPointerException.class, () -> shardRange.writeTo(null)); - } - - /** - * Test that the writeTo method correctly writes the ShardRange data to the stream - * and that it can be read back correctly. - */ - - public void testWriteToCorrectlySerializesAndDeserializes() throws IOException { - int shardId = 1; - int start = 100; - int end = 200; - ShardRange originalShardRange = new ShardRange(shardId, start, end); - - BytesStreamOutput out = new BytesStreamOutput(); - originalShardRange.writeTo(out); - - StreamInput in = out.bytes().streamInput(); - ShardRange deserializedShardRange = new ShardRange(in); - - assertEquals(originalShardRange.shardId(), deserializedShardRange.shardId()); - assertEquals(originalShardRange.start(), deserializedShardRange.start()); - assertEquals(originalShardRange.end(), deserializedShardRange.end()); - } - - private XContentParser createParser(String content) throws IOException { - return JsonXContent.jsonXContent.createParser(NamedXContentRegistry.EMPTY, LoggingDeprecationHandler.INSTANCE, content); - } - - public void testParseValidShardRange() throws IOException { - String json = "{" + "\"shard_id\": 1," + "\"start\": 0," + "\"end\": 100" + "}"; - - XContentParser parser = createParser(json); - parser.nextToken(); // Move to start object - - ShardRange shardRange = ShardRange.parse(parser); - - assertEquals("Incorrect shard ID", 1, shardRange.shardId()); - assertEquals("Incorrect start value", 0, shardRange.start()); - assertEquals("Incorrect end value", 100, shardRange.end()); - } - - public void testParseEmptyObject() throws IOException { - String json = "{}"; - - XContentParser parser = createParser(json); - parser.nextToken(); - - ShardRange shardRange = ShardRange.parse(parser); - - assertEquals("Empty object should have default shard ID", -1, shardRange.shardId()); - assertEquals("Empty object should have default start", -1, shardRange.start()); - assertEquals("Empty object should have default end", -1, shardRange.end()); - } - - public void testParseWithExtraFields() throws IOException { - String json = "{" + "\"shard_id\": 1," + "\"start\": 0," + "\"end\": 100," + "\"extra_field\": \"value\"" + "}"; - - XContentParser parser = createParser(json); - parser.nextToken(); - - ShardRange shardRange = ShardRange.parse(parser); - - assertEquals("Incorrect shard ID", 1, shardRange.shardId()); - assertEquals("Incorrect start value", 0, shardRange.start()); - assertEquals("Incorrect end value", 100, shardRange.end()); - } - -} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/SplitShardsMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/SplitShardsMetadataTests.java deleted file mode 100644 index 7526d9c40c736..0000000000000 --- a/server/src/test/java/org/opensearch/cluster/metadata/SplitShardsMetadataTests.java +++ /dev/null @@ -1,1263 +0,0 @@ -/* - * SPDX-License-Identifier: Apache-2.0 - * - * The OpenSearch Contributors require contributions made to - * this file be licensed under the Apache-2.0 license or a - * compatible open source license. - */ - -package org.opensearch.cluster.metadata; - -import org.opensearch.cluster.Diff; -import org.opensearch.common.io.stream.BytesStreamOutput; -import org.opensearch.common.xcontent.json.JsonXContent; -import org.opensearch.core.common.bytes.BytesReference; -import org.opensearch.core.common.io.stream.StreamInput; -import org.opensearch.core.xcontent.ToXContent; -import org.opensearch.core.xcontent.XContentBuilder; -import org.opensearch.core.xcontent.XContentParser; -import org.opensearch.test.OpenSearchTestCase; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; - -import org.mockito.Mockito; - -import static org.opensearch.cluster.metadata.SplitShardsMetadata.validateShardRanges; -import static org.mockito.Mockito.when; - -public class SplitShardsMetadataTests extends OpenSearchTestCase { - - public void testGetShardIdOfHashWithNoChildren() { - // Setup - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - - // Test: When there are no children, should return the root shard id - int result = builder.build().getShardIdOfHash(0, 100, false); - assertEquals(0, result); - } - - public void testGetRootShards_splitInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - - SplitShardsMetadata metadata = builder.build(); - List rootShards = metadata.getRootShards(); - List expectedRootShards = List.of(0, 1, 2); - assertEquals(expectedRootShards, rootShards); - } - - public void testGetRootShards_splitCompleted() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - - // split - builder.splitShard(0, 3); - - // split completed for shard 0 - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - - SplitShardsMetadata metadata = builder.build(); - List rootShards = metadata.getRootShards(); - List expectedRootShards = List.of(0, 1, 2); - assertEquals(expectedRootShards, rootShards); - - Set expectedActiveShards = Set.of(1, 2, 3, 4, 5); - Set actualShardIds = new HashSet<>(); - for (Iterator iterator = metadata.getActiveShardIterator(); iterator.hasNext();) { - actualShardIds.add(iterator.next()); - } - assertEquals(expectedActiveShards, actualShardIds); - } - - /** - * Test for no split data - */ - public void testGetActiveShardIterator_emptyIterator() { - // Arrange - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(0); // maxShardId = -1 - SplitShardsMetadata metadata = builder.build(); - - // Act & Assert - Iterator iterator = metadata.getActiveShardIterator(); - assertFalse(iterator.hasNext()); - } - - /** - * Tests get active shard iterator for split in progress - */ - public void test_getActiveShardIterator_splitInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - - Set expectedActiveShards = Set.of(0, 1, 2); - Set actualActiveShards = new HashSet<>(); - SplitShardsMetadata metadata = builder.build(); - for (Iterator it = metadata.getActiveShardIterator(); it.hasNext();) { - actualActiveShards.add(it.next()); - } - - // Split in progress, should return all 3 shards - assertEquals(actualActiveShards, expectedActiveShards); - - } - - /** - * Tests get active shard iterator for split completed - */ - public void test_getActiveShardIterator_splitCompleted() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); // Start with 1 root shard - // First split - create existing child shards - builder.splitShard(0, 3); - - // split completed for shard 0 - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - - Set expectedActiveShardsForConsecutiveShardSplit = Set.of(1, 2, 3, 4, 5); - Set actualActiveShardsForConsecutiveShardSplit = new HashSet<>(); - SplitShardsMetadata splitCompletedShardSplitMetadata = builder.build(); - for (Iterator it = splitCompletedShardSplitMetadata.getActiveShardIterator(); it.hasNext();) { - actualActiveShardsForConsecutiveShardSplit.add(it.next()); - } - // Split in progress, should return all 4 shards - assertEquals(expectedActiveShardsForConsecutiveShardSplit, actualActiveShardsForConsecutiveShardSplit); - } - - /** - * Tests get active shard iterator for consecutive split - */ - public void test_getActiveShardIterator_consecutiveSplits() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); // Start with 1 root shard - // First split - create existing child shards - builder.splitShard(0, 3); - - Set expectedActiveShards = Set.of(0, 1, 2); - Set actualActiveShards = new HashSet<>(); - SplitShardsMetadata metadata = builder.build(); - for (Iterator it = metadata.getActiveShardIterator(); it.hasNext();) { - actualActiveShards.add(it.next()); - } - - // Split in progress, should return all 3 shards - assertEquals(actualActiveShards, expectedActiveShards); - - // split completed for shard 0 - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - - // split in progress for shard 1 - builder.splitShard(1, 2); - - // split completed for shard 1 - builder.updateSplitMetadataForChildShards(1, Set.of(6, 7)); - - Set expectedActiveShardsForConsecutiveShardSplit = Set.of(2, 3, 4, 5, 6, 7); - Set actualActiveShardsForConsecutiveShardSplit = new HashSet<>(); - SplitShardsMetadata consecutiveShardSplitMetadata = builder.build(); - for (Iterator it = consecutiveShardSplitMetadata.getActiveShardIterator(); it.hasNext();) { - actualActiveShardsForConsecutiveShardSplit.add(it.next()); - } - - // Should return only active child shards - assertEquals(actualActiveShardsForConsecutiveShardSplit, expectedActiveShardsForConsecutiveShardSplit); - } - - /** - * Tests getShardIdOfHash when there are existing child shards and in-progress children. - */ - public void testGetShardIdOfHashWithExistingAndInProgressChildren() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); // Start with 1 root shard - // First split - create existing child shards - builder.splitShard(0, 3); - builder.updateSplitMetadataForChildShards(0, Set.of(1, 2, 3)); - - // Second split - split the middle shard (ID 2) - builder.splitShard(2, 2); - SplitShardsMetadata metadata = builder.build(); - - // Execute - test hash that falls in the range of first child of shard 2 - int result = metadata.getShardIdOfHash(0, 500, true); - - // Assert - should route to the first child of the in-progress split - assertEquals("Hash should route to first child of in-progress split", 5, result); - } - - /** - * Test getShardIdOfHash when root shard has no children but in-progress split exists - */ - public void testGetShardIdOfHashWithInProgressSplit() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); // Start with 1 root shard - // Setup - split root shard - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - - // Execute - test with hash that should go to second child - int result = metadata.getShardIdOfHash(0, 100, true); - - // Verify - should route to the second child shard - assertEquals("Should route to second child shard", 2, result); - } - - /** - * Test case for in-progress split being ignored - */ - public void testGetShardIdOfHashWithInProgressSplitIgnored() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - - // Should return root shard ID when includeInProgressChildren is false - assertEquals(0, metadata.getShardIdOfHash(0, 100, false)); - } - - /** - * Test case for getShardIdOfHash when the root shard has no children and no in-progress split - */ - public void test_getShardIdOfHash_returnsRootShardId() { - // Arrange - int rootShardId = 0; - int hash = 123; - boolean includeInProgressChildren = false; - - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); // 1 root shard - SplitShardsMetadata metadata = builder.build(); - - // Act - int result = metadata.getShardIdOfHash(rootShardId, hash, includeInProgressChildren); - - // Assert - assertEquals("Should return the root shard ID when there are no children", rootShardId, result); - } - - /** - * Test getShardIdOfHash when root shard has children but no in-progress splits - */ - public void test_getShardIdOfHash_withExistingChildren() { - // Setup - int rootShardId = 0; - int hash = 500; - boolean includeInProgressChildren = true; - - // Setup - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); // Start with 1 root shard - - // Split the root shard into 3 pieces - builder.splitShard(0, 3); - - // Complete the split with three child shards - Set childShardIds = Set.of(1, 2, 3); - builder.updateSplitMetadataForChildShards(0, childShardIds); - - SplitShardsMetadata metadata = builder.build(); - - // Execute - int result = metadata.getShardIdOfHash(rootShardId, hash, includeInProgressChildren); - - // Verify - assertEquals(2, result); - } - - /** - * Test that getNumberOfRootShards returns the correct number of root shards - */ - public void testGetNumberOfRootShardsReturnsCorrectCount() { - // Arrange - int expectedRootShards = 5; - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(expectedRootShards); - SplitShardsMetadata metadata = builder.build(); - - // Act - int actualRootShards = metadata.getNumberOfRootShards(); - - // Assert - assertEquals("Number of root shards should match the initial count", expectedRootShards, actualRootShards); - } - - /** - * Test case for getting number of root shards after splitting - * Expected behavior: Should return the same number of root shards - */ - public void testGetNumberOfRootShardsAfterSplitting() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - assertEquals("Number of root shards should remain unchanged after splitting", 5, metadata.getNumberOfRootShards()); - } - - /** - * Test case for empty metadata - * Expected behavior: Should return 0 for empty metadata - */ - public void testGetNumberOfRootShardsWithEmptyMetadata() { - SplitShardsMetadata emptyMetadata = new SplitShardsMetadata.Builder(0).build(); - assertEquals("Empty metadata should have 0 root shards", 0, emptyMetadata.getNumberOfRootShards()); - } - - /** - * This test verifies that the getNumberOfShards() method - * correctly returns maxShardId + 1 - */ - public void testGetNumberOfShardsReturnsMaxShardIdPlusOne() { - // Arrange - int maxShardId = 5; - SplitShardsMetadata metadata = new SplitShardsMetadata.Builder(maxShardId + 1).build(); - - // Act - int result = metadata.getNumberOfShards(); - - // Assert - assertEquals("getNumberOfShards should return maxShardId + 1", maxShardId + 1, result); - } - - /** - * Test case for getting number of shards after splitting - */ - public void testGetNumberOfShardsAfterSplitting() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - // Split the root shard into 3 pieces - builder.splitShard(0, 3); - - // Complete the split with three child shards - Set childShardIds = Set.of(1, 2, 3); - builder.updateSplitMetadataForChildShards(0, childShardIds); - SplitShardsMetadata metadata = builder.build(); - assertEquals(3, metadata.getNumberOfShards()); - } - - /** - * Test case for empty metadata - * Expected behavior: Should return 0 for empty metadata - */ - public void testGetNumberOfShardsWithEmptyMetadata() { - SplitShardsMetadata emptyMetadata = new SplitShardsMetadata.Builder(0).build(); - assertEquals("Empty metadata should have 0 shards", 0, emptyMetadata.getNumberOfShards()); - } - - /** - * Test case for getChildShardsOfParent when the parent shard has child shards - */ - public void testGetChildShardsOfParentWithExistingChildren() { - // Create a builder and add some child shards for a parent - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - int parentShardId = 2; - int numberOfChildren = 3; - builder.splitShard(parentShardId, numberOfChildren); - - // Build the metadata - SplitShardsMetadata metadata = builder.build(); - - // Get child shards of the parent - ShardRange[] childShards = metadata.getChildShardsOfParent(parentShardId); - - // Assert that child shards are returned and match the expected number - assertNotNull(childShards); - assertEquals(numberOfChildren, childShards.length); - - // Verify that returned child shards match across calls - ShardRange[] originalChildShards = metadata.getChildShardsOfParent(parentShardId); - for (int i = 0; i < childShards.length; i++) { - assertEquals(originalChildShards[i], childShards[i]); - } - } - - /** - * Test case for getChildShardsOfParent after a cancelled split operation - */ - public void testGetChildShardsOfParentAfterCancelledSplit() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - builder.cancelSplit(0); - SplitShardsMetadata metadata = builder.build(); - - assertNull("Should return null for shard with cancelled split", metadata.getChildShardsOfParent(0)); - } - - /** - * Test case for getChildShardsOfParent with an unsplit shard - */ - public void testGetChildShardsOfParentWithUnsplitShard() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - SplitShardsMetadata metadata = builder.build(); - - assertNull("Should return null for unsplit shard", metadata.getChildShardsOfParent(0)); - } - - /** - * Test case for SplitShardsMetadata.Builder constructor with numberOfShards parameter - */ - public void testBuilderConstructorWithNumberOfShards() { - int numberOfShards = 5; - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(numberOfShards); - - SplitShardsMetadata metadata = builder.build(); - - assertEquals(numberOfShards, metadata.getNumberOfRootShards()); - assertEquals(numberOfShards, metadata.getNumberOfShards()); - assertTrue(metadata.getInProgressSplitShardIds().isEmpty()); - - for (int i = 0; i < numberOfShards; i++) { - assertNull(metadata.getChildShardsOfParent(i)); - } - } - - /** - * Tests the Builder constructor with a SplitShardsMetadata instance where some root shards have no children. - */ - public void testBuilderWithEmptyRootShards() { - // Create a SplitShardsMetadata with some empty root shards - SplitShardsMetadata.Builder originalBuilder = new SplitShardsMetadata.Builder(3); - SplitShardsMetadata original = originalBuilder.build(); - - // Create a new Builder using the original SplitShardsMetadata - SplitShardsMetadata.Builder newBuilder = new SplitShardsMetadata.Builder(original); - - // Build the new SplitShardsMetadata - SplitShardsMetadata result = newBuilder.build(); - - // Assert that the new SplitShardsMetadata matches the original - assertEquals(original, result); - assertEquals(3, result.getNumberOfRootShards()); - assertNull(result.getChildShardsOfParent(0)); - assertNull(result.getChildShardsOfParent(1)); - assertNull(result.getChildShardsOfParent(2)); - assertTrue(result.getInProgressSplitShardIds().isEmpty()); - } - - /** - * Test that cancelSplit correctly removes the in-progress split and resets the inProgressSplitShardId - */ - public void testCancelSplitRemovesInProgressSplit() { - // Setup - int numberOfShards = 5; - int sourceShardId = 2; - int numberOfChildren = 2; - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(numberOfShards); - - // Start a split - builder.splitShard(sourceShardId, numberOfChildren); - - // Verify split is in progress - assertTrue(builder.build().getInProgressSplitShardIds().contains(sourceShardId)); - - // Cancel the split - builder.cancelSplit(sourceShardId); - - // Build the metadata - SplitShardsMetadata metadata = builder.build(); - - // Verify the split was canceled - assertTrue(metadata.getInProgressSplitShardIds().isEmpty()); - assertNull(metadata.getChildShardsOfParent(sourceShardId)); - } - - /** - * Test that getInProgressSplitShardId returns the correct shard ID - */ - public void testGetInProgressSplitShardId() { - int expectedShardId = 5; - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(10); - builder.splitShard(expectedShardId, 2); - SplitShardsMetadata metadata = builder.build(); - - assertTrue( - "The in-progress split shard ID should match the expected value", - metadata.getInProgressSplitShardIds().contains(expectedShardId) - ); - } - - public void testGetInProgressSplitShardIdAfterCancelingSplit() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - builder.splitShard(0, 2); - builder.cancelSplit(0); - SplitShardsMetadata metadata = builder.build(); - assertTrue("After canceling a split, shard split shouldn't be in progress", metadata.getInProgressSplitShardIds().isEmpty()); - } - - public void testGetInProgressSplitShardIdAfterCompletingSplit() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - builder.splitShard(0, 2); - builder.updateSplitMetadataForChildShards(0, Set.of(1, 2)); - SplitShardsMetadata metadata = builder.build(); - assertTrue("After completing a split, shard split shouldn't be in progress", metadata.getInProgressSplitShardIds().isEmpty()); - } - - public void testGetInProgressSplitShardIdWhenNoSplitInProgress() { - SplitShardsMetadata metadata = new SplitShardsMetadata.Builder(1).build(); - assertTrue("No split should be in progress", metadata.getInProgressSplitShardIds().isEmpty()); - } - - public void testGetInProgressSplitShardIdWithValidSplitInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - assertTrue( - "When a valid split is in progress, the correct shard ID should be returned", - metadata.getInProgressSplitShardIds().contains(0) - ); - } - - /** - * Test that the method returns true when the correct shard is being split. - */ - public void testIsSplitOfShardInProgress_CorrectShardInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(2, 2); - SplitShardsMetadata metadata = builder.build(); - assertTrue(metadata.isSplitOfShardInProgress(2)); - } - - /** - * Test that the method returns false when a different shard is being split. - */ - public void testIsSplitOfShardInProgress_DifferentShardInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(2, 2); - SplitShardsMetadata metadata = builder.build(); - assertFalse(metadata.isSplitOfShardInProgress(1)); - } - - /** - * Test that the method returns false when no split is in progress. - */ - public void testIsSplitOfShardInProgress_NoSplitInProgress() { - SplitShardsMetadata metadata = new SplitShardsMetadata.Builder(5).build(); - assertFalse(metadata.isSplitOfShardInProgress(0)); - assertTrue(metadata.getInProgressSplitShardIds().isEmpty()); - } - - public void testSplitShardWhenSplitAlreadyInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(1, 3); - metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(2, 5); - metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4)); - builder.updateSplitMetadataForChildShards(1, Set.of(5, 6, 7)); - builder.updateSplitMetadataForChildShards(2, Set.of(8, 9, 10, 11, 12)); - metadata = builder.build(); - Set finalShards = new HashSet<>(Arrays.asList(3, 4, 5, 6, 7, 8, 9, 10, 11, 12)); - Iterator shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - assertTrue(finalShards.remove(shardIterator.next())); - } - assertTrue(finalShards.isEmpty()); - } - - public void testSplitShardWhenAnotherSplitIsCancelled_1() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(1, 4); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.splitShard(2, 5); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4)); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(2, Set.of(9, 10, 11, 12, 13)); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.cancelSplit(1); - metadata = builder.build(); - Set finalShards = new HashSet<>(Arrays.asList(1, 3, 4, 9, 10, 11, 12, 13)); - Iterator shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - - // Test reusing holes in new split - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(4, 2); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(4, Set.of(5, 6)); - finalShards = new HashSet<>(Arrays.asList(1, 3, 5, 6, 9, 10, 11, 12, 13)); - metadata = builder.build(); - shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(12, 3); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(12, Set.of(7, 8, 14)); - finalShards = new HashSet<>(Arrays.asList(1, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14)); - metadata = builder.build(); - shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - } - - public void testSplitShardWhenAnotherSplitIsCancelled_2() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(1, 3); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.splitShard(2, 5); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4)); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(1, Set.of(5, 6, 7)); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.cancelSplit(2); - metadata = builder.build(); - Set finalShards = new HashSet<>(Arrays.asList(2, 3, 4, 5, 6, 7)); - Iterator shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - } - - public void testSplitCancelWhenAnotherSplitIsInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(1, 4); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.splitShard(2, 5); - builder.cancelSplit(1); - metadata = builder.build(); - Set finalShards = new HashSet<>(Arrays.asList(0, 1, 2)); - Iterator shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4)); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(2, Set.of(9, 10, 11, 12, 13)); - builder = new SplitShardsMetadata.Builder(builder.build()); - metadata = builder.build(); - finalShards = new HashSet<>(Arrays.asList(1, 3, 4, 9, 10, 11, 12, 13)); - shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - - // Test reusing holes in new split - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(4, 2); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(4, Set.of(5, 6)); - finalShards = new HashSet<>(Arrays.asList(1, 3, 5, 6, 9, 10, 11, 12, 13)); - metadata = builder.build(); - shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(12, 3); - builder = new SplitShardsMetadata.Builder(builder.build()); - builder.updateSplitMetadataForChildShards(12, Set.of(7, 8, 14)); - finalShards = new HashSet<>(Arrays.asList(1, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14)); - metadata = builder.build(); - shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int nextShard = shardIterator.next(); - assertTrue(finalShards.remove(nextShard)); - } - assertTrue(finalShards.isEmpty()); - - // Case 1 - // 1. 0,1,2 original - // 2. 0 -> 3,4 - // 3. 1 -> 5, 6, 7 - // 4. 0=>X - // 5. 0 -> 3,4,8 - // 6. 0 completes 1,2,3,4,8 holes is 0, max shard id 8 - // 7. 1=>X, 1,2,3,4,8 hoes is 5,6,7 m= - - } - - public void testChildShardIdsGeneration() { - int maxInitialShards = 20; - int initialShards = randomIntBetween(1, maxInitialShards); - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(initialShards); - SplitShardsMetadata metadata = builder.build(); - int splitExecCount = randomIntBetween(1, 20); - for (int i = 0; i < splitExecCount; i++) { - Set cancellableShards = new HashSet<>(); - Map> splitCompletingShards = new HashMap<>(); - for (int j = 0; j < 10; j++) { - List activeAndNotInProgress = getActiveAndNotInProgressShards(metadata); - if (activeAndNotInProgress.isEmpty()) { - break; - } - builder = new SplitShardsMetadata.Builder(metadata); - int splittingShard = activeAndNotInProgress.get(randomIntBetween(0, activeAndNotInProgress.size() - 1)); - - int numberOfChildren = randomIntBetween(1, 50); - List childShardRanges; - try { - childShardRanges = builder.splitShard(splittingShard, numberOfChildren); - } catch (Exception ex) { - if (ex.getMessage().equals("Cannot split shard [" + splittingShard + "] further.")) { - break; - } - throw ex; - } - Set childShardIds = new HashSet<>(); - childShardRanges.forEach(shardRange -> childShardIds.add(shardRange.shardId())); - validateShardsInExistingAndNewMetadata(metadata, builder.build(), new HashSet<>(), -1); - if (childShardRanges.size() != childShardIds.size()) { - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(splittingShard, numberOfChildren); - } - assertEquals(childShardRanges.size(), childShardIds.size()); - ensureUniqueChildShards(metadata, childShardIds, builder.build()); - metadata = builder.build(); - - boolean complete = randomBoolean(); - builder = new SplitShardsMetadata.Builder(metadata); - boolean cancelSplit = randomBoolean(); - if (complete) { - if (cancelSplit) { - builder.cancelSplit(splittingShard); - validateShardsInExistingAndNewMetadata(metadata, builder.build(), new HashSet<>(), -1); - metadata = builder.build(); - builder = new SplitShardsMetadata.Builder(metadata); - builder.splitShard(splittingShard, childShardIds.size()); - builder.updateSplitMetadataForChildShards(splittingShard, childShardIds); - validateShardsInExistingAndNewMetadata(metadata, builder.build(), childShardIds, splittingShard); - metadata = builder.build(); - } else { - builder.updateSplitMetadataForChildShards(splittingShard, childShardIds); - validateShardsInExistingAndNewMetadata(metadata, builder.build(), childShardIds, splittingShard); - metadata = builder.build(); - } - } else { - if (cancelSplit) { - cancellableShards.add(splittingShard); - } else { - splitCompletingShards.put(splittingShard, childShardIds); - } - } - } - finishPendingSplits(cancellableShards, splitCompletingShards, metadata); - } - } - - private void finishPendingSplits( - Set cancellableShards, - Map> splitCompletingShards, - SplitShardsMetadata metadata - ) { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(metadata); - for (Integer shardId : cancellableShards) { - builder.cancelSplit(shardId); - validateShardsInExistingAndNewMetadata(metadata, builder.build(), new HashSet<>(), -1); - metadata = builder.build(); - } - - for (Integer shardId : splitCompletingShards.keySet()) { - builder = new SplitShardsMetadata.Builder(metadata); - builder.updateSplitMetadataForChildShards(shardId, splitCompletingShards.get(shardId)); - validateShardsInExistingAndNewMetadata(metadata, builder.build(), splitCompletingShards.get(shardId), shardId); - metadata = builder.build(); - } - } - - private List getActiveAndNotInProgressShards(SplitShardsMetadata metadata) { - List result = new ArrayList<>(); - Iterator shardIterator = metadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - int shardId = shardIterator.next(); - if (metadata.getInProgressSplitShardIds().contains(shardId) == false) { - result.add(shardId); - } - } - return result; - } - - private void ensureUniqueChildShards(SplitShardsMetadata previousMetadata, Set childShards, SplitShardsMetadata newMetadata) { - Set existingShards = new HashSet<>(); - Iterator shardIterator = previousMetadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - existingShards.add(shardIterator.next()); - } - childShards.forEach(shard -> assertFalse(existingShards.contains(shard))); - - previousMetadata.getInProgressSplitShardIds().forEach(shardId -> assertFalse(childShards.contains(shardId))); - } - - private void validateShardsInExistingAndNewMetadata( - SplitShardsMetadata existing, - SplitShardsMetadata newMetadata, - Set newShards, - int splittingShard - ) { - Set existingShards = new HashSet<>(); - Iterator shardIterator = existing.getActiveShardIterator(); - while (shardIterator.hasNext()) { - existingShards.add(shardIterator.next()); - } - existingShards.remove(splittingShard); - - Set shardsFromNewMetadata = new HashSet<>(); - shardIterator = newMetadata.getActiveShardIterator(); - while (shardIterator.hasNext()) { - shardsFromNewMetadata.add(shardIterator.next()); - } - - assertTrue(shardsFromNewMetadata.containsAll(existingShards)); - if (newShards.isEmpty()) { - assertEquals(existingShards.size(), shardsFromNewMetadata.size()); - } else { - assertTrue(shardsFromNewMetadata.containsAll(newShards)); - assertEquals(existingShards.size() + newShards.size(), shardsFromNewMetadata.size()); - assertFalse(newMetadata.getInProgressSplitShardIds().contains(splittingShard)); - ShardRange[] shardRanges = newMetadata.getChildShardsOfParent(splittingShard); - assertEquals(shardRanges.length, newShards.size()); - for (ShardRange shardRange : shardRanges) { - assertTrue(newShards.contains(shardRange.shardId())); - } - } - } - - public void testSplitShardWithCompletedSplit() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - builder.updateSplitMetadataForChildShards(0, Set.of(5, 6)); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { builder.splitShard(0, 2); }); - assertTrue(exception.getMessage().startsWith("Split of shard [0] is already in progress or completed.")); - } - - public void testSplitShardWithInvalidNumberOfChildren() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> { builder.splitShard(0, 10000000); }); - assertTrue(exception.getMessage().contains("Cannot split shard [0] further.")); - } - - public void testSplitInvalidShardId() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - IllegalArgumentException exception = assertThrows( - IllegalArgumentException.class, - () -> builder.splitShard(10, 2) // Invalid shard ID - ); - assertTrue(exception.getMessage().contains("Shard ID doesn't exist in the current list of shard ranges")); - } - - public void testSplitShardBasicOperation() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - // Test basic split operation - builder.splitShard(0, 2); - SplitShardsMetadata metadata = builder.build(); - - // Verify split state - assertTrue(metadata.isSplitOfShardInProgress(0)); - ShardRange[] childShards = metadata.getChildShardsOfParent(0); - assertNotNull(childShards); - assertEquals(2, childShards.length); - - // Verify shard ranges - assertTrue(childShards[0].end() < childShards[1].start()); - assertEquals(childShards[0].end() + 1, childShards[1].start()); - } - - public void testSplitShardNestedSplit() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); // Start with 1 root shard - - // First split - split root shard 0 into two children (shards 1 and 2) - builder.splitShard(0, 2); - builder.updateSplitMetadataForChildShards(0, Set.of(1, 2)); - - // Verify first split - SplitShardsMetadata metadata = builder.build(); - ShardRange[] firstLevelShards = metadata.getChildShardsOfParent(0); - assertNotNull("First level split should create child shards", firstLevelShards); - assertEquals("Should have 2 child shards", 2, firstLevelShards.length); - - // Second split - split first child shard (shard 1) into three children - builder.splitShard(1, 3); - builder.updateSplitMetadataForChildShards(1, Set.of(3, 4, 5)); - - // Get final metadata - metadata = builder.build(); - - // Verify the nested split results - ShardRange[] secondLevelShards = metadata.getChildShardsOfParent(1); - assertNotNull("Second level split should create child shards", secondLevelShards); - assertEquals("Should have 3 child shards", 3, secondLevelShards.length); - - assertTrue("Original shard 0 should be split", metadata.isSplitParent(0)); - assertTrue("Child shard 1 should be split", metadata.isSplitParent(1)); - assertFalse("Child shard 2 should not be split", metadata.isSplitParent(2)); - - // Verify ranges are properly distributed - for (int i = 0; i < secondLevelShards.length - 1; i++) { - assertTrue("Shard ranges should be in order", secondLevelShards[i].end() < secondLevelShards[i + 1].start()); - assertEquals("Shard ranges should be contiguous", secondLevelShards[i].end() + 1, secondLevelShards[i + 1].start()); - } - - // Verify range boundaries - ShardRange parentRange = firstLevelShards[0]; // Range of shard 1 - assertEquals("First child should start at parent's start", parentRange.start(), secondLevelShards[0].start()); - assertEquals("Last child should end at parent's end", parentRange.end(), secondLevelShards[secondLevelShards.length - 1].end()); - - // Test hash routing through the nested structure - long childShardRangeDiff = Math.abs((long) parentRange.end() - parentRange.start() + 1) / 3; - int hashInFirstThird = parentRange.start() + (int) childShardRangeDiff - 1; - - assertEquals("Hash should route to first child of nested split", 3, metadata.getShardIdOfHash(0, hashInFirstThird, true)); - } - - public void testHashCodeAndEquals() { - assertEquals(createDummySplitShardsMetadataWithNoRandomisation(), createDummySplitShardsMetadataWithNoRandomisation()); - assertEquals( - createDummySplitShardsMetadataWithNoRandomisation().hashCode(), - createDummySplitShardsMetadataWithNoRandomisation().hashCode() - ); - } - - private SplitShardsMetadata createDummySplitShardsMetadataWithNoRandomisation() { - - int numberOfRootShards = 2; - ShardRange[][] rootToChildShards = new ShardRange[numberOfRootShards][]; - Map parentToChildShards = new HashMap<>(); - rootToChildShards[0] = new ShardRange[2]; - rootToChildShards[0][0] = new ShardRange(numberOfRootShards, 0, 100); - rootToChildShards[0][1] = new ShardRange(numberOfRootShards + 1, 0, 100); - - // put details for already split shard into parent-to-child map - parentToChildShards.put(0, new ShardRange[] { rootToChildShards[0][0], rootToChildShards[0][1] }); - - // put details for splitting shard into parent-to-child map - int inProgressSplitShardId = 1; - Set inProgressSplitShards = Set.of(0); - Set activeShards = Set.of(0); - parentToChildShards.put(inProgressSplitShardId, new ShardRange[2]); - parentToChildShards.get(inProgressSplitShardId)[0] = new ShardRange(numberOfRootShards + 2, 0, 100); - parentToChildShards.get(inProgressSplitShardId)[1] = new ShardRange(numberOfRootShards + 3, 101, 200); - - return new SplitShardsMetadata(rootToChildShards, parentToChildShards, inProgressSplitShards, activeShards, 0); - } - - public void testUpdateSplitMetadataWithInvalidSourceShardId() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - Set newChildShardIds = new HashSet<>(); - newChildShardIds.add(1); - newChildShardIds.add(2); - assertThrows(IllegalArgumentException.class, () -> { builder.updateSplitMetadataForChildShards(1, newChildShardIds); }); - } - - public void testUpdateSplitMetadataForChildShards_NoSplitInProgress() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(1); - List childShards = builder.splitShard(0, 2); - builder.cancelSplit(0); - - Set newChildShardIds = new HashSet<>(); - childShards.forEach(shard -> newChildShardIds.add(shard.shardId())); - assertThrows(AssertionError.class, () -> { builder.updateSplitMetadataForChildShards(0, newChildShardIds); }); - } - - public void testUpdateSplitMetadataForChildShards_InvalidChildShardId() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - Set newChildShardIds = new HashSet<>(); - newChildShardIds.add(5); - newChildShardIds.add(7); - - assertThrows(AssertionError.class, () -> { builder.updateSplitMetadataForChildShards(0, newChildShardIds); }); - } - - public void testUpdateSplitMetadataForChildShards_MismatchedChildShardCount() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - Set newChildShardIds = new HashSet<>(); - newChildShardIds.add(5); - - assertThrows(AssertionError.class, () -> { builder.updateSplitMetadataForChildShards(0, newChildShardIds); }); - } - - public void testUpdateSplitMetadataForChildShards_EmptyNewChildShardIds() { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(5); - builder.splitShard(0, 2); - Set newChildShardIds = new HashSet<>(); - - assertThrows(AssertionError.class, () -> { builder.updateSplitMetadataForChildShards(0, newChildShardIds); }); - } - - /** - * Test case for updateSplitMetadataForChildShards method - * Verifies that the method correctly updates the metadata for child shards - */ - public void testUpdateSplitMetadataForChildShards_Success() { - // Arrange - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(2); - int sourceShardId = 0; - builder.splitShard(sourceShardId, 2); - Set newChildShardIds = new HashSet<>(Arrays.asList(2, 3)); - - // Act - builder.updateSplitMetadataForChildShards(sourceShardId, newChildShardIds); - SplitShardsMetadata metadata = builder.build(); - - // Assert - assertTrue(metadata.getInProgressSplitShardIds().isEmpty()); - assertEquals(3, metadata.getNumberOfShards()); - - ShardRange[] childShards = metadata.getChildShardsOfParent(sourceShardId); - assertNotNull(childShards); - assertEquals(2, childShards.length); - assertTrue(newChildShardIds.contains(childShards[0].shardId())); - assertTrue(newChildShardIds.contains(childShards[1].shardId())); - - } - - public void testReadDiffFromWithEmptyInput() throws IOException { - StreamInput emptyInput = Mockito.mock(StreamInput.class); - when(emptyInput.available()).thenReturn(0); - assertNotNull(SplitShardsMetadata.readDiffFrom(emptyInput)); - } - - public void testEqualsMethod() { - // Test 1: Same object reference - SplitShardsMetadata metadata = new SplitShardsMetadata.Builder(1).build(); - assertTrue("Same object should be equal to itself", metadata.equals(metadata)); - - // Test 2: Null comparison - assertFalse("Object should not be equal to null", metadata.equals(null)); - - // Test 3: Different class - assertFalse("Should not be equal to different class", metadata.equals(new Object())); - - // Test 4: Equal objects - SplitShardsMetadata metadata1 = new SplitShardsMetadata.Builder(2).build(); - SplitShardsMetadata metadata2 = new SplitShardsMetadata.Builder(2).build(); - assertTrue("Two equivalent objects should be equal", metadata1.equals(metadata2)); - - // Test 5: Different maxShardId - SplitShardsMetadata differentMaxShardId = new SplitShardsMetadata.Builder(3).build(); - assertFalse("Objects with different maxShardId should not be equal", metadata1.equals(differentMaxShardId)); - - // Test 6: Different inProgressSplitShardId - SplitShardsMetadata.Builder builder1 = new SplitShardsMetadata.Builder(2); - builder1.splitShard(0, 2); // This will set inProgressSplitShardId - SplitShardsMetadata withSplit = builder1.build(); - assertFalse("Objects with different inProgressSplitShardId should not be equal", metadata1.equals(withSplit)); - - // Test 7: Different rootShardsToAllChildren - SplitShardsMetadata.Builder builder2 = new SplitShardsMetadata.Builder(2); - builder2.splitShard(0, 2); - builder2.updateSplitMetadataForChildShards(0, Set.of(2, 3)); - SplitShardsMetadata withDifferentRootShards = builder2.build(); - assertFalse("Objects with different rootShardsToAllChildren should not be equal", metadata1.equals(withDifferentRootShards)); - - // Test 8: Different parentToChildShards - SplitShardsMetadata.Builder builder3 = new SplitShardsMetadata.Builder(2); - builder3.splitShard(1, 2); // Split different shard - builder3.updateSplitMetadataForChildShards(1, Set.of(2, 3)); - SplitShardsMetadata withDifferentParentToChild = builder3.build(); - assertFalse( - "Objects with different parentToChildShards should not be equal", - withDifferentRootShards.equals(withDifferentParentToChild) - ); - - // Test 9: Symmetric equality - assertTrue("Equality should be symmetric", metadata1.equals(metadata2) && metadata2.equals(metadata1)); - - // Test 10: Transitive equality - SplitShardsMetadata metadata3 = new SplitShardsMetadata.Builder(2).build(); - assertTrue( - "Equality should be transitive", - metadata1.equals(metadata2) && metadata2.equals(metadata3) && metadata1.equals(metadata3) - ); - } - - public void testEqualsWithNullArrayElements() { - // Create metadata with null elements in rootShardsToAllChildren - SplitShardsMetadata.Builder builder1 = new SplitShardsMetadata.Builder(3); - SplitShardsMetadata metadata1 = builder1.build(); - - SplitShardsMetadata.Builder builder2 = new SplitShardsMetadata.Builder(3); - SplitShardsMetadata metadata2 = builder2.build(); - - assertTrue("Objects with null array elements should be equal if structure is same", metadata1.equals(metadata2)); - } - - public void testEqualsWithEmptyMaps() { - // Create metadata with empty parentToChildShards maps - SplitShardsMetadata.Builder builder1 = new SplitShardsMetadata.Builder(1); - SplitShardsMetadata metadata1 = builder1.build(); - - SplitShardsMetadata.Builder builder2 = new SplitShardsMetadata.Builder(1); - SplitShardsMetadata metadata2 = builder2.build(); - - assertTrue("Objects with empty maps should be equal if structure is same", metadata1.equals(metadata2)); - } - - public void testValidateShardRangesOverlap() { - // Test overlapping ranges - ShardRange[] overlappingRanges = new ShardRange[] { - new ShardRange(0, Integer.MIN_VALUE, 100), - new ShardRange(0, 50, Integer.MAX_VALUE) // Overlaps with previous range at 50-100 - }; - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> validateShardRanges(0, overlappingRanges)); - assertTrue(exception.getMessage().contains("Shard range overlap")); - } - - public void testValidateShardRangesBelowThreshold() { - // Test range below threshold - ShardRange[] smallRange = new ShardRange[] { new ShardRange(0, Integer.MIN_VALUE, Integer.MIN_VALUE + 10) }; - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> validateShardRanges(0, smallRange)); - assertTrue(exception.getMessage().contains("below shard range threshold")); - } - - public void testValidateShardRangesMissingRange() { - // Test missing range - ShardRange[] missingRange = new ShardRange[] { new ShardRange(0, Integer.MIN_VALUE, 100), new ShardRange(0, 200, Integer.MAX_VALUE) // Missing - // range - // at - // 101-199 - }; - - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> validateShardRanges(0, missingRange)); - assertTrue(exception.getMessage().contains("Shard range from 101 to 199 is missing from the list of shard ranges")); - } - - public void testValidateShardRangesInvalidStartRange() { - // test shard range not at Integer.Min - ShardRange[] missingRange = new ShardRange[] { - new ShardRange(0, Integer.MIN_VALUE + 1, 100), - new ShardRange(0, 100, Integer.MAX_VALUE) // Missing range at 101-199 - }; - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> validateShardRanges(0, missingRange)); - assertTrue(exception.getMessage().contains("Shard range from -2147483648 to -2147483648 is missing from the list of shard ranges")); - - // Empty shard range - IllegalArgumentException exception2 = assertThrows(IllegalArgumentException.class, () -> validateShardRanges(0, new ShardRange[0])); - assertTrue(exception2.getMessage().contains("No shard range defined for child shards of shard 0")); - } - - public void testStreamSerdeNoSplit() throws IOException { - SplitShardsMetadata original = new SplitShardsMetadata.Builder(5).build(); - SplitShardsMetadata deserialized = streamRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testStreamSerdeSplitInProgress() throws IOException { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - SplitShardsMetadata original = builder.build(); - SplitShardsMetadata deserialized = streamRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testStreamSerdeSplitCompleted() throws IOException { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - SplitShardsMetadata original = builder.build(); - SplitShardsMetadata deserialized = streamRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testStreamSerdeConsecutiveSplits() throws IOException { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - builder.splitShard(3, 2); - SplitShardsMetadata original = builder.build(); - SplitShardsMetadata deserialized = streamRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testXContentSerdeNoSplit() throws IOException { - SplitShardsMetadata original = new SplitShardsMetadata.Builder(5).build(); - SplitShardsMetadata deserialized = xContentRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testXContentSerdeSplitInProgress() throws IOException { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - SplitShardsMetadata original = builder.build(); - SplitShardsMetadata deserialized = xContentRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testXContentSerdeSplitCompleted() throws IOException { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - SplitShardsMetadata original = builder.build(); - SplitShardsMetadata deserialized = xContentRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testXContentSerdeConsecutiveSplits() throws IOException { - SplitShardsMetadata.Builder builder = new SplitShardsMetadata.Builder(3); - builder.splitShard(0, 3); - builder.updateSplitMetadataForChildShards(0, Set.of(3, 4, 5)); - builder.splitShard(3, 2); - SplitShardsMetadata original = builder.build(); - SplitShardsMetadata deserialized = xContentRoundTrip(original); - assertEquals(original, deserialized); - } - - public void testDiffSerde() throws IOException { - SplitShardsMetadata before = new SplitShardsMetadata.Builder(3).build(); - SplitShardsMetadata.Builder afterBuilder = new SplitShardsMetadata.Builder(3); - afterBuilder.splitShard(0, 3); - SplitShardsMetadata after = afterBuilder.build(); - - Diff diff = after.diff(before); - BytesStreamOutput out = new BytesStreamOutput(); - diff.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - Diff deserializedDiff = SplitShardsMetadata.readDiffFrom(in); - SplitShardsMetadata applied = deserializedDiff.apply(before); - assertEquals(after, applied); - } - } - - private SplitShardsMetadata streamRoundTrip(SplitShardsMetadata original) throws IOException { - BytesStreamOutput out = new BytesStreamOutput(); - original.writeTo(out); - try (StreamInput in = out.bytes().streamInput()) { - return new SplitShardsMetadata(in); - } - } - - private SplitShardsMetadata xContentRoundTrip(SplitShardsMetadata original) throws IOException { - XContentBuilder builder = JsonXContent.contentBuilder(); - builder.startObject(); - original.toXContent(builder, ToXContent.EMPTY_PARAMS); - builder.endObject(); - try (XContentParser parser = createParser(JsonXContent.jsonXContent, BytesReference.bytes(builder))) { - return SplitShardsMetadata.parse(parser); - } - } - -} diff --git a/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java index 702a30ed09760..aee76b0c8c599 100644 --- a/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/opensearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -358,9 +358,6 @@ public void testToXContentAPI_SameTypeName() throws IOException { + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [ ]\n" + " },\n" @@ -368,15 +365,6 @@ public void testToXContentAPI_SameTypeName() throws IOException { + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" @@ -541,9 +529,6 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [\n" + " \"allocationId\"\n" @@ -558,15 +543,6 @@ public void testToXContentAPI_FlatSettingTrue_ReduceMappingFalse() throws IOExce + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n" @@ -667,9 +643,6 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce + " \"primary_terms\" : {\n" + " \"0\" : 1\n" + " },\n" - + " \"primary_terms_map\" : {\n" - + " \"0\" : 1\n" - + " },\n" + " \"in_sync_allocations\" : {\n" + " \"0\" : [\n" + " \"allocationId\"\n" @@ -684,15 +657,6 @@ public void testToXContentAPI_FlatSettingFalse_ReduceMappingTrue() throws IOExce + " \"system\" : false,\n" + " \"ingestion_status\" : {\n" + " \"is_paused\" : false\n" - + " },\n" - + " \"split_shards_metadata\" : {\n" - + " \"num_of_root_shards\" : 1,\n" - + " \"max_shard_id\" : 0,\n" - + " \"active_shard_ids\" : [\n" - + " 0\n" - + " ],\n" - + " \"root_shards_to_all_children\" : { },\n" - + " \"parent_to_child_shards\" : { }\n" + " }\n" + " }\n" + " },\n"