diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index 562f752b82220..55a1aca166fcc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -83,7 +83,7 @@ public void testRepositoryCreation() throws Exception { .setMetadata(true) .get(); Metadata metadata = clusterStateResponse.getState().getMetadata(); - RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); + RepositoriesMetadata repositoriesMetadata = metadata.getProject().custom(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); assertThat(repositoriesMetadata.repository("test-repo-1"), notNullValue()); assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs")); @@ -94,7 +94,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> check that both repositories are in cluster state"); clusterStateResponse = client.admin().cluster().prepareState(TEST_REQUEST_TIMEOUT).clear().setMetadata(true).get(); metadata = clusterStateResponse.getState().getMetadata(); - repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); + repositoriesMetadata = metadata.getProject().custom(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); assertThat(repositoriesMetadata.repositories().size(), equalTo(2)); assertThat(repositoriesMetadata.repository("test-repo-1"), notNullValue()); diff --git a/server/src/main/java/org/elasticsearch/TransportVersions.java b/server/src/main/java/org/elasticsearch/TransportVersions.java index fcddbbdae8efa..d086fb79167fe 100644 --- a/server/src/main/java/org/elasticsearch/TransportVersions.java +++ b/server/src/main/java/org/elasticsearch/TransportVersions.java @@ -207,6 +207,7 @@ static TransportVersion def(int id) { public static final TransportVersion RESCORE_VECTOR_ALLOW_ZERO = def(9_039_0_00); public static final TransportVersion PROJECT_ID_IN_SNAPSHOT = def(9_040_0_00); public static final TransportVersion INDEX_STATS_AND_METADATA_INCLUDE_PEAK_WRITE_LOAD = def(9_041_0_00); + public static final TransportVersion REPOSITORIES_METADATA_AS_PROJECT_CUSTOM = def(9_042_0_00); /* * STOP! READ THIS FIRST! No, really, diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index c5fd400070936..6aab5bf0fb4d1 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -226,7 +226,7 @@ public static List getNamedWriteables() { RepositoryCleanupInProgress::readDiffFrom ); // Metadata - registerMetadataCustom(entries, RepositoriesMetadata.TYPE, RepositoriesMetadata::new, RepositoriesMetadata::readDiffFrom); + registerProjectCustom(entries, RepositoriesMetadata.TYPE, RepositoriesMetadata::new, RepositoriesMetadata::readDiffFrom); registerProjectCustom(entries, IngestMetadata.TYPE, IngestMetadata::new, IngestMetadata::readDiffFrom); registerProjectCustom(entries, ScriptMetadata.TYPE, ScriptMetadata::new, ScriptMetadata::readDiffFrom); registerProjectCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom); @@ -283,7 +283,7 @@ public static List getNamedXWriteables() { // Metadata entries.add( new NamedXContentRegistry.Entry( - Metadata.ClusterCustom.class, + Metadata.ProjectCustom.class, new ParseField(RepositoriesMetadata.TYPE), RepositoriesMetadata::fromXContent ) diff --git a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java index c1964881ca0af..542f6879ffd41 100644 --- a/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java +++ b/server/src/main/java/org/elasticsearch/cluster/DiffableUtils.java @@ -27,6 +27,7 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.function.BiFunction; import java.util.function.Consumer; import java.util.function.Function; import java.util.function.Predicate; @@ -98,6 +99,18 @@ public static > MapDiff emptyDiff() { return (MapDiff) EMPTY; } + /** + * Merges two map diffs into one unified diff with write-only value serializer. + */ + @SuppressWarnings("unchecked") + public static , T1 extends T, T2 extends T, M extends Map> MapDiff merge( + MapDiff> diff1, + MapDiff> diff2, + KeySerializer keySerializer + ) { + return merge(diff1, diff2, keySerializer, DiffableValueSerializer.getWriteOnlyInstance()); + } + /** * Merges two map diffs into one unified diff. */ @@ -146,6 +159,39 @@ public static > boolean hasKey(MapDiff diff, return false; } + /** + * Create a new MapDiff from the specified MapDiff by transforming its diffs with the provided diffUpdateFunction as well as + * transforming its upserts with the provided upsertUpdateFunction. Whether an entry should be transformed is determined by + * the specified keyPredicate. + * @param diff The original MapDiff + * @param keyPredicate Determines whether an entry should be transformed + * @param diffUpdateFunction A function to transform a Diff entry + * @param upsertUpdateFunction A function to transform an upsert entry + * @return A new MapDiff as a result of the transformation + */ + public static > MapDiff updateDiffsAndUpserts( + MapDiff diff, + Predicate keyPredicate, + BiFunction, Diff> diffUpdateFunction, + BiFunction upsertUpdateFunction + ) { + final var newDiffs = diff.getDiffs().stream().map(entry -> { + if (keyPredicate.test(entry.getKey()) == false) { + return entry; + } + return Map.entry(entry.getKey(), diffUpdateFunction.apply(entry.getKey(), entry.getValue())); + }).toList(); + + final var newUpserts = diff.getUpserts().stream().map(entry -> { + if (keyPredicate.test(entry.getKey()) == false) { + return entry; + } + return Map.entry(entry.getKey(), upsertUpdateFunction.apply(entry.getKey(), entry.getValue())); + }).toList(); + + return new MapDiff<>(diff.keySerializer, diff.valueSerializer, diff.deletes, newDiffs, newUpserts, diff.builderCtor); + } + /** * Create a new JDK map backed MapDiff by transforming the keys with the provided keyFunction. * @param diff Original MapDiff to transform diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 33fe2ad01c9dd..3155604088fae 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -11,6 +11,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; +import org.apache.lucene.util.SetOnce; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diff; @@ -54,6 +55,7 @@ import java.io.IOException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; @@ -952,11 +954,13 @@ private MetadataDiff(StreamInput in) throws IOException { multiProject = null; } else { fromNodeBeforeMultiProjectsSupport = false; - clusterCustoms = DiffableUtils.readImmutableOpenMapDiff( - in, - DiffableUtils.getStringKeySerializer(), - CLUSTER_CUSTOM_VALUE_SERIALIZER - ); + // Repositories metadata is sent as Metadata#customs diff from old node. We need to + // 1. Split it from the Metadata#customs diff + // 2. Merge it into the default project's ProjectMetadataDiff + final var bwcCustoms = maybeReadBwcCustoms(in); + clusterCustoms = bwcCustoms.v1(); + final var defaultProjectCustoms = bwcCustoms.v2(); + reservedStateMetadata = DiffableUtils.readImmutableOpenMapDiff( in, DiffableUtils.getStringKeySerializer(), @@ -964,15 +968,61 @@ private MetadataDiff(StreamInput in) throws IOException { ); singleProject = null; - multiProject = DiffableUtils.readJdkMapDiff( - in, - PROJECT_ID_SERIALIZER, - ProjectMetadata::readFrom, - ProjectMetadata.ProjectMetadataDiff::new + multiProject = readMultiProjectDiffs(in, defaultProjectCustoms); + } + } + + private static + Tuple< + MapDiff>, + MapDiff>> + maybeReadBwcCustoms(StreamInput in) throws IOException { + if (in.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM)) { + return readBwcCustoms(in); + } else { + return new Tuple<>( + DiffableUtils.readImmutableOpenMapDiff(in, DiffableUtils.getStringKeySerializer(), CLUSTER_CUSTOM_VALUE_SERIALIZER), + null ); } } + @SuppressWarnings("unchecked") + private static MapDiff> readMultiProjectDiffs( + StreamInput in, + MapDiff> defaultProjectCustoms + ) throws IOException { + final var multiProject = DiffableUtils.readJdkMapDiff( + in, + PROJECT_ID_SERIALIZER, + ProjectMetadata::readFrom, + ProjectMetadata.ProjectMetadataDiff::new + ); + + // If the defaultProjectCustoms has content, the diff is read from an old node. We need to merge it into the + // default project's ProjectMetadataDiff + if (defaultProjectCustoms != null && defaultProjectCustoms.isEmpty() == false) { + return DiffableUtils.updateDiffsAndUpserts(multiProject, ProjectId.DEFAULT::equals, (k, v) -> { + assert ProjectId.DEFAULT.equals(k) : k; + assert v instanceof ProjectMetadata.ProjectMetadataDiff : v; + final var projectMetadataDiff = (ProjectMetadata.ProjectMetadataDiff) v; + return projectMetadataDiff.withCustoms( + DiffableUtils.merge( + projectMetadataDiff.customs(), + defaultProjectCustoms, + DiffableUtils.getStringKeySerializer(), + BWC_CUSTOM_VALUE_SERIALIZER + ) + ); + }, (k, v) -> { + assert ProjectId.DEFAULT.equals(k) : k; + return ProjectMetadata.builder(v).clearCustoms().customs(defaultProjectCustoms.apply(v.customs())).build(); + }); + } else { + return multiProject; + } + } + @SuppressWarnings("unchecked") private static Tuple< @@ -1016,15 +1066,103 @@ public void writeTo(StreamOutput out) throws IOException { buildUnifiedCustomDiff().writeTo(out); buildUnifiedReservedStateMetadataDiff().writeTo(out); } else { - clusterCustoms.writeTo(out); - reservedStateMetadata.writeTo(out); - if (multiProject != null) { - multiProject.writeTo(out); + final var multiProjectToWrite = multiProject != null + ? multiProject + : DiffableUtils.singleEntryDiff(DEFAULT_PROJECT_ID, singleProject, PROJECT_ID_SERIALIZER); + + if (out.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM)) { + writeDiffWithRepositoriesMetadataAsClusterCustom(out, clusterCustoms, multiProjectToWrite, reservedStateMetadata); } else { - // construct the MapDiff to write out this single project - DiffableUtils.singleEntryDiff(DEFAULT_PROJECT_ID, singleProject, PROJECT_ID_SERIALIZER).writeTo(out); + clusterCustoms.writeTo(out); + reservedStateMetadata.writeTo(out); + multiProjectToWrite.writeTo(out); + } + } + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + private static void writeDiffWithRepositoriesMetadataAsClusterCustom( + StreamOutput out, + MapDiff> clusterCustoms, + MapDiff> multiProject, + MapDiff> reservedStateMetadata + ) throws IOException { + assert out.getTransportVersion().onOrAfter(TransportVersions.MULTI_PROJECT) + && out.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM) : out.getTransportVersion(); + + // For old nodes, RepositoriesMetadata needs to be sent as a cluster custom. This is possible when (a) the repositories + // are defined only for the default project or (b) no repositories at all. What we need to do are: + // 1. Iterate through the multi-project's MapDiff to extract the RepositoriesMetadata of the default project + // 2. Throws if any repositories are found for non-default projects + // 3. Merge default project's RepositoriesMetadata into Metadata#customs + final var combineClustersCustoms = new SetOnce>>(); + final var updatedMultiProject = DiffableUtils.updateDiffsAndUpserts(multiProject, ignore -> true, (k, v) -> { + assert v instanceof ProjectMetadata.ProjectMetadataDiff : v; + final var projectMetadataDiff = (ProjectMetadata.ProjectMetadataDiff) v; + final var bwcCustoms = DiffableUtils.split( + projectMetadataDiff.customs(), + RepositoriesMetadata.TYPE::equals, + PROJECT_CUSTOM_VALUE_SERIALIZER, + type -> RepositoriesMetadata.TYPE.equals(type) == false, + PROJECT_CUSTOM_VALUE_SERIALIZER + ); + // Simply return if RepositoriesMetadata is not found + if (bwcCustoms.v1().isEmpty()) { + return projectMetadataDiff; + } + // RepositoriesMetadata can only be defined for the default project. Otherwise throw exception. + if (ProjectId.DEFAULT.equals(k) == false) { + throwForVersionBeforeRepositoriesMetadataMigration(out); + } + // RepositoriesMetadata is found for the default project as a diff, merge it into the Metadata#customs + combineClustersCustoms.set( + DiffableUtils.>merge( + clusterCustoms, + bwcCustoms.v1(), + DiffableUtils.getStringKeySerializer() + ) + ); + return projectMetadataDiff.withCustoms(bwcCustoms.v2()); + }, (k, v) -> { + final ProjectCustom projectCustom = v.customs().get(RepositoriesMetadata.TYPE); + // Simply return if RepositoriesMetadata is not found + if (projectCustom == null) { + return v; + } + // RepositoriesMetadata can only be defined for the default project. Otherwise throw exception. + if (ProjectId.DEFAULT.equals(k) == false) { + throwForVersionBeforeRepositoriesMetadataMigration(out); } + // RepositoriesMetadata found for the default project as an upsert, package it as MapDiff and merge into Metadata#customs + combineClustersCustoms.set( + DiffableUtils.>merge( + clusterCustoms, + DiffableUtils.singleUpsertDiff(RepositoriesMetadata.TYPE, projectCustom, DiffableUtils.getStringKeySerializer()), + DiffableUtils.getStringKeySerializer() + ) + ); + return ProjectMetadata.builder(v).removeCustom(RepositoriesMetadata.TYPE).build(); + }); + + if (combineClustersCustoms.get() != null) { + combineClustersCustoms.get().writeTo(out); + } else { + clusterCustoms.writeTo(out); } + + reservedStateMetadata.writeTo(out); + updatedMultiProject.writeTo(out); + } + + private static void throwForVersionBeforeRepositoriesMetadataMigration(StreamOutput out) { + assert out.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM) : out.getTransportVersion(); + throw new UnsupportedOperationException( + "Serialize a diff with repositories defined for multiple projects requires version on or after [" + + TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM + + "], but got [" + + out.getTransportVersion() + + "]" + ); } @SuppressWarnings("unchecked") @@ -1178,7 +1316,17 @@ public static Metadata readFrom(StreamInput in) throws IOException { builder.put(ReservedStateMetadata.readFrom(in)); } } else { - readClusterCustoms(in, builder); + List defaultProjectCustoms = List.of(); + if (in.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM)) { + // Extract the default project's repositories metadata from the Metadata#customs from an old node + defaultProjectCustoms = new ArrayList<>(); + readBwcCustoms(in, builder, defaultProjectCustoms::add); + assert defaultProjectCustoms.size() <= 1 + : "expect only a single default project custom for repository metadata, but got " + + defaultProjectCustoms.stream().map(ProjectCustom::getWriteableName).toList(); + } else { + readClusterCustoms(in, builder); + } int reservedStateSize = in.readVInt(); for (int i = 0; i < reservedStateSize; i++) { @@ -1186,11 +1334,16 @@ public static Metadata readFrom(StreamInput in) throws IOException { } builder.projectMetadata(in.readMap(ProjectId::readFrom, ProjectMetadata::readFrom)); + defaultProjectCustoms.forEach(c -> builder.getProject(ProjectId.DEFAULT).putCustom(c.getWriteableName(), c)); } return builder.build(); } private static void readBwcCustoms(StreamInput in, Builder builder) throws IOException { + readBwcCustoms(in, builder, projectCustom -> builder.putProjectCustom(projectCustom.getWriteableName(), projectCustom)); + } + + private static void readBwcCustoms(StreamInput in, Builder builder, Consumer projectCustomConsumer) throws IOException { final Set clusterScopedNames = in.namedWriteableRegistry().getReaders(ClusterCustom.class).keySet(); final Set projectScopedNames = in.namedWriteableRegistry().getReaders(ProjectCustom.class).keySet(); final int count = in.readVInt(); @@ -1206,9 +1359,9 @@ private static void readBwcCustoms(StreamInput in, Builder builder) throws IOExc if (custom instanceof PersistentTasksCustomMetadata persistentTasksCustomMetadata) { final var tuple = persistentTasksCustomMetadata.split(); builder.putCustom(tuple.v1().getWriteableName(), tuple.v1()); - builder.putProjectCustom(tuple.v2().getWriteableName(), tuple.v2()); + projectCustomConsumer.accept(tuple.v2()); } else { - builder.putProjectCustom(custom.getWriteableName(), custom); + projectCustomConsumer.accept(custom); } } else { throw new IllegalArgumentException("Unknown custom name [" + name + "]"); @@ -1275,12 +1428,42 @@ public void writeTo(StreamOutput out) throws IOException { combinedMetadata.addAll(singleProject.reservedStateMetadata().values()); out.writeCollection(combinedMetadata); } else { - VersionedNamedWriteable.writeVersionedWriteables(out, customs.values()); + if (out.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM)) { + if (isSingleProject() || hasNoNonDefaultProjectRepositories(projects().values())) { + // Repositories metadata must be sent as Metadata#customs for old nodes + final List combinedCustoms = new ArrayList<>(customs.size() + 1); + combinedCustoms.addAll(customs.values()); + final ProjectCustom custom = getProject(ProjectId.DEFAULT).custom(RepositoriesMetadata.TYPE); + if (custom != null) { + combinedCustoms.add(custom); + } + VersionedNamedWriteable.writeVersionedWriteables(out, combinedCustoms); + } else { + throw new UnsupportedOperationException( + "Serialize metadata with repositories defined for multiple projects requires version on or after [" + + TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM + + "], but got [" + + out.getTransportVersion() + + "]" + ); + } + } else { + VersionedNamedWriteable.writeVersionedWriteables(out, customs.values()); + } + out.writeCollection(reservedStateMetadata.values()); out.writeMap(projectMetadata, StreamOutput::writeWriteable, StreamOutput::writeWriteable); } } + /** + * @return {@code true} iff no repositories are defined for non-default-projects. + */ + private static boolean hasNoNonDefaultProjectRepositories(Collection projects) { + return projects.stream() + .allMatch(project -> ProjectId.DEFAULT.equals(project.id()) || project.custom(RepositoriesMetadata.TYPE) == null); + } + public static Builder builder() { return new Builder(); } @@ -1524,6 +1707,13 @@ public Builder putCustom(String type, ProjectCustom custom) { return putProjectCustom(type, custom); } + @Deprecated(forRemoval = true) + public Builder putDefaultProjectCustom(String type, ProjectCustom custom) { + assert projectMetadata.containsKey(ProjectId.DEFAULT) : projectMetadata.keySet(); + getProject(ProjectId.DEFAULT).putCustom(type, custom); + return this; + } + public ClusterCustom getCustom(String type) { return customs.get(type); } @@ -1772,7 +1962,10 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { currentFieldName = parser.currentName(); } else if (token == XContentParser.Token.START_ARRAY) { switch (currentFieldName) { - case "projects" -> readProjects(parser, builder); + case "projects" -> { + assert builder.projectMetadata.isEmpty() : "expect empty projectMetadata, but got " + builder.projectMetadata; + readProjects(parser, builder); + } default -> throw new IllegalArgumentException("Unexpected field [" + currentFieldName + "]"); } } else if (token == XContentParser.Token.START_OBJECT) { @@ -1814,7 +2007,21 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { builder.putProjectCustom(PersistentTasksCustomMetadata.TYPE, tuple.v2()); builder.putCustom(ClusterPersistentTasksCustomMetadata.TYPE, tuple.v1()); } else { - builder.putProjectCustom(name, projectCustom); + if (projectCustom instanceof RepositoriesMetadata repositoriesMetadata) { + // Repositories at the top level means it is either + // 1. Serialization from a single project for which we need to create the default project + // 2. Serialization before repositories metadata migration. In this case, the metadata may + // contain multiple projects, including the default project, which should be deserialized + // already with readProjects, i.e. no need to create the default project. + final ProjectMetadata.Builder defaultProjectBuilder = builder.getProject(ProjectId.DEFAULT); + if (defaultProjectBuilder == null) { + builder.putProjectCustom(name, projectCustom); + } else { + defaultProjectBuilder.putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata); + } + } else { + builder.putProjectCustom(name, projectCustom); + } } }); } else { diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ProjectMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ProjectMetadata.java index 5b8864309f7e1..8a791333e8639 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ProjectMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ProjectMetadata.java @@ -10,6 +10,7 @@ package org.elasticsearch.cluster.metadata; import org.apache.lucene.util.CollectionUtil; +import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; @@ -1485,6 +1486,11 @@ public Builder removeCustomIf(BiPredicate customs) { customs.forEach((key, value) -> Objects.requireNonNull(value, key)); this.customs.putAllFromMap(customs); @@ -2201,7 +2207,17 @@ public void writeTo(StreamOutput out) throws IOException { indexMetadata.writeTo(out, true); } out.writeCollection(templates.values()); - VersionedNamedWriteable.writeVersionedWriteables(out, customs.values()); + Collection filteredCustoms = customs.values(); + if (out.getTransportVersion().before(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM)) { + // RepositoriesMetadata is sent as part of Metadata#customs for version before RepositoriesMetadata migration + // So we exclude it from the project level customs + if (custom(RepositoriesMetadata.TYPE) != null) { + assert ProjectId.DEFAULT.equals(id) + : "Only default project can have repositories metadata. Otherwise the code should have thrown before it reaches here"; + filteredCustoms = filteredCustoms.stream().filter(custom -> custom instanceof RepositoriesMetadata == false).toList(); + } + } + VersionedNamedWriteable.writeVersionedWriteables(out, filteredCustoms); out.writeCollection(reservedStateMetadata.values()); } @@ -2312,6 +2328,12 @@ public ProjectMetadata apply(ProjectMetadata part) { } return builder.build(true); } + + ProjectMetadataDiff withCustoms( + DiffableUtils.MapDiff> customs + ) { + return new ProjectMetadataDiff(indices, templates, customs, reservedStateMetadata); + } } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java index 21615886ecd09..512d7e6e551b8 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java @@ -37,7 +37,7 @@ /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetadata extends AbstractNamedDiffable implements Metadata.ClusterCustom { +public class RepositoriesMetadata extends AbstractNamedDiffable implements Metadata.ProjectCustom { public static final String TYPE = "repositories"; @@ -51,8 +51,13 @@ public class RepositoriesMetadata extends AbstractNamedDiffable repositories; + @Deprecated(forRemoval = true) public static RepositoriesMetadata get(ClusterState state) { - return state.metadata().custom(TYPE, EMPTY); + return get(state.metadata().getDefaultProject()); + } + + public static RepositoriesMetadata get(ProjectMetadata project) { + return project.custom(TYPE, EMPTY); } /** @@ -182,8 +187,8 @@ public RepositoriesMetadata(StreamInput in) throws IOException { this.repositories = in.readCollectionAsImmutableList(RepositoryMetadata::new); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(Metadata.ClusterCustom.class, TYPE, in); + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Metadata.ProjectCustom.class, TYPE, in); } /** diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index ac13f132d2879..f62c50390475b 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -343,7 +343,7 @@ public ClusterState execute(ClusterState currentState) { repositoriesMetadata.add(new RepositoryMetadata(request.name(), request.type(), request.settings())); } repositories = new RepositoriesMetadata(repositoriesMetadata); - mdBuilder.putCustom(RepositoriesMetadata.TYPE, repositories); + mdBuilder.putDefaultProjectCustom(RepositoriesMetadata.TYPE, repositories); changed = true; return ClusterState.builder(currentState).metadata(mdBuilder).build(); } @@ -448,7 +448,7 @@ public ClusterState execute(ClusterState currentState) { } else { final RepositoriesMetadata newReposMetadata = currentReposMetadata.withUuid(repositoryName, repositoryUuid); final Metadata.Builder metadata = Metadata.builder(currentState.metadata()) - .putCustom(RepositoriesMetadata.TYPE, newReposMetadata); + .putDefaultProjectCustom(RepositoriesMetadata.TYPE, newReposMetadata); return ClusterState.builder(currentState).metadata(metadata).build(); } } @@ -531,7 +531,7 @@ public ClusterState execute(ClusterState currentState) { } if (changed) { repositories = new RepositoriesMetadata(repositoriesMetadata); - mdBuilder.putCustom(RepositoriesMetadata.TYPE, repositories); + mdBuilder.putDefaultProjectCustom(RepositoriesMetadata.TYPE, repositories); return ClusterState.builder(currentState).metadata(mdBuilder).build(); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index d3fae8adb466d..85039f1b61792 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2399,7 +2399,7 @@ private ClusterState getClusterStateWithUpdatedRepositoryGeneration(ClusterState return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.getMetadata()) - .putCustom( + .putDefaultProjectCustom( RepositoriesMetadata.TYPE, RepositoriesMetadata.get(currentState) .withUpdatedGeneration(repoMetadata.name(), repoData.getGenId(), repoData.getGenId()) @@ -2914,7 +2914,9 @@ public ClusterState execute(ClusterState currentState) { : withGenerations.withUuid(metadata.name(), newRepositoryData.getUuid()); final ClusterState newClusterState = stateFilter.apply( ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, withUuid)) + .metadata( + Metadata.builder(currentState.getMetadata()).putDefaultProjectCustom(RepositoriesMetadata.TYPE, withUuid) + ) .build() ); return updateRepositoryGenerationsIfNecessary(newClusterState, expectedGen, newGen); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataRepositoriesMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataRepositoriesMetadataTests.java new file mode 100644 index 0000000000000..bfffd99efb338 --- /dev/null +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataRepositoriesMetadataTests.java @@ -0,0 +1,397 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.TransportVersion; +import org.elasticsearch.TransportVersions; +import org.elasticsearch.cluster.AbstractNamedDiffable; +import org.elasticsearch.cluster.ClusterModule; +import org.elasticsearch.cluster.Diff; +import org.elasticsearch.cluster.NamedDiff; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.stream.BytesStreamOutput; +import org.elasticsearch.common.io.stream.NamedWriteableAwareStreamInput; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.Nullable; +import org.elasticsearch.core.Tuple; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.TransportVersionUtils; +import org.elasticsearch.xcontent.ToXContent; +import org.junit.Before; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.Iterator; +import java.util.List; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class MetadataRepositoriesMetadataTests extends ESTestCase { + + private NamedWriteableRegistry namedWriteableRegistry; + private NamedWriteableRegistry namedWriteableRegistryBwc; + + @Before + public void initializeRegistries() { + namedWriteableRegistry = new NamedWriteableRegistry(ClusterModule.getNamedWriteables()); + namedWriteableRegistryBwc = new NamedWriteableRegistry( + Stream.concat( + ClusterModule.getNamedWriteables().stream().filter(entry -> entry.name.equals(RepositoriesMetadata.TYPE) == false), + Stream.of( + new NamedWriteableRegistry.Entry( + Metadata.ClusterCustom.class, + RepositoriesMetadata.TYPE, + TestBwcRepositoryMetadata::new + ), + new NamedWriteableRegistry.Entry(NamedDiff.class, RepositoriesMetadata.TYPE, TestBwcRepositoryMetadata::readDiffFrom) + ) + ).toList() + ); + } + + public void testRepositoriesMetadataSerialization() throws IOException { + final Metadata orig = randomMetadata(between(0, 5)); + + final BytesStreamOutput out = new BytesStreamOutput(); + orig.writeTo(out); + + final Metadata fromStream = Metadata.readFrom( + new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry) + ); + + assertTrue(Metadata.isGlobalStateEquals(orig, fromStream)); + } + + public void testRepositoriesMetadataDiffSerialization() throws IOException { + final Tuple tuple = randomMetadataAndUpdate(between(0, 5)); + final Metadata before = tuple.v1(); + final Metadata after = tuple.v2(); + + final Diff diff = after.diff(before); + + final BytesStreamOutput out = new BytesStreamOutput(); + diff.writeTo(out); + + final Diff diffFromStream = Metadata.readDiffFrom( + new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry) + ); + final Metadata metadataFromDiff = diffFromStream.apply(before); + + assertTrue(Metadata.isGlobalStateEquals(after, metadataFromDiff)); + } + + public void testRepositoriesMetadataSerializationBwc() throws IOException { + { + final var oldVersion = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersions.MULTI_PROJECT, + TransportVersionUtils.getPreviousVersion(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM) + ); + final Metadata orig = randomMetadata(between(0, 5), -1); + doTestRepositoriesMetadataSerializationBwc(orig, oldVersion); + } + + { + final var oldVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MULTI_PROJECT); + // Before multi-project, BWC is possible for a single project + final Metadata orig = randomMetadata(0, -1); + doTestRepositoriesMetadataSerializationBwc(orig, oldVersion); + } + } + + private void doTestRepositoriesMetadataSerializationBwc(Metadata orig, TransportVersion oldVersion) throws IOException { + final BytesStreamOutput out = new BytesStreamOutput(); + out.setTransportVersion(oldVersion); + orig.writeTo(out); + + // Round-trip from new-node writes to old-stream and new-node reads from old-stream + final var in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry); + in.setTransportVersion(oldVersion); + final Metadata fromStream = Metadata.readFrom(in); + assertTrue(Metadata.isGlobalStateEquals(orig, fromStream)); + + // Simulate new-node writes to old-stream and old-node reads from old-stream + simulateReadOnOldNodeAndAssert(out.bytes(), oldVersion, orig); + } + + private void simulateReadOnOldNodeAndAssert(BytesReference bytesReference, TransportVersion oldVersion, Metadata orig) + throws IOException { + final var in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistryBwc); + in.setTransportVersion(oldVersion); + final Metadata fromStream = Metadata.readFrom(in); + assertMetadataBwcEquals(fromStream, orig); + } + + public void testRepositoriesMetadataDiffSerializationBwc() throws IOException { + { + final var oldVersion = TransportVersionUtils.randomVersionBetween( + random(), + TransportVersions.MULTI_PROJECT, + TransportVersionUtils.getPreviousVersion(TransportVersions.REPOSITORIES_METADATA_AS_PROJECT_CUSTOM) + ); + final Tuple tuple = randomMetadataAndUpdate(between(0, 5), -1); + doTestRepositoriesMetadataDiffSerializationBwc(tuple, oldVersion); + } + + { + final var oldVersion = TransportVersionUtils.getPreviousVersion(TransportVersions.MULTI_PROJECT); + // Before multi-project, BWC is possible for a single project + final Tuple tuple = randomMetadataAndUpdate(0, -1); + doTestRepositoriesMetadataDiffSerializationBwc(tuple, oldVersion); + } + } + + private void doTestRepositoriesMetadataDiffSerializationBwc(Tuple tuple, TransportVersion oldVersion) + throws IOException { + final Metadata before = tuple.v1(); + final Metadata after = tuple.v2(); + final Diff diff = after.diff(before); + + final BytesStreamOutput out = new BytesStreamOutput(); + + out.setTransportVersion(oldVersion); + diff.writeTo(out); + + // Round-trip from new-node writes diff to old-stream and new-node reads from old-stream + final var in = new NamedWriteableAwareStreamInput(out.bytes().streamInput(), namedWriteableRegistry); + in.setTransportVersion(oldVersion); + final Diff diffFromStream = Metadata.readDiffFrom(in); + final Metadata metadataFromDiff = diffFromStream.apply(before); + assertTrue(Metadata.isGlobalStateEquals(after, metadataFromDiff)); + + // Simulate new-node writes diff to old-stream and old-node reads from old-stream + simulateReadAndApplyDiffOnOldNodeAndAssert(out.bytes(), oldVersion, before, after); + } + + // Simulate the deserialization and application of the diff on an old node + private void simulateReadAndApplyDiffOnOldNodeAndAssert( + BytesReference bytesReference, + TransportVersion oldVersion, + Metadata before, + Metadata after + ) throws IOException { + final var in = new NamedWriteableAwareStreamInput(bytesReference.streamInput(), namedWriteableRegistryBwc); + in.setTransportVersion(oldVersion); + final Diff diffFromStream = Metadata.readDiffFrom(in); + + // On the old node, the "before" metadata would have repositories in the Metadata#customs. We simulate + // it by move the repositories from the default project into Metadata#customs as a TestBwcRepositoryMetadata + final Metadata beforeBwc = Metadata.builder(before) + .putCustom( + RepositoriesMetadata.TYPE, + new TestBwcRepositoryMetadata(RepositoriesMetadata.get(before.getProject(ProjectId.DEFAULT)).repositories()) + ) + .put(ProjectMetadata.builder(before.getProject(ProjectId.DEFAULT)).removeCustom(RepositoriesMetadata.TYPE)) + .build(); + // Apply the diff to the BWC "before" metadata and get the "after" metadata on an old node + final Metadata metadataFromDiff = diffFromStream.apply(beforeBwc); + + assertMetadataBwcEquals(metadataFromDiff, after); + } + + /** + * Check equality of the two metadata by handling the different types of RepositoriesMetadata + * @param metadataBwc The old style metadata with RepositoriesMetadata as Metadata#ClusterCustom + * @param metadataNew The new style metadata with RepositoriesMetadata as Metadata#ProjectCustom + */ + private void assertMetadataBwcEquals(Metadata metadataBwc, Metadata metadataNew) { + // BWC metadata has no repositories in project + assertThat(metadataBwc.getProject(ProjectId.DEFAULT).custom(RepositoriesMetadata.TYPE), nullValue()); + // New metadata has no repositories in Metadata#customs + assertThat(metadataNew.custom(RepositoriesMetadata.TYPE), nullValue()); + + final Metadata.ClusterCustom custom = metadataBwc.custom(RepositoriesMetadata.TYPE); + + if (custom != null) { + assertThat(custom, notNullValue()); + assertThat(custom, instanceOf(TestBwcRepositoryMetadata.class)); + assertThat( + ((TestBwcRepositoryMetadata) custom).repositories, + equalTo(RepositoriesMetadata.get(metadataNew.getProject(ProjectId.DEFAULT)).repositories()) + ); + } else { + // If there is no repositories in the deserialized metadataBwc, there should no repositories in the original metadataNew + assertThat(metadataNew.getProject(ProjectId.DEFAULT).custom(RepositoriesMetadata.TYPE), nullValue()); + } + + // All other parts excluding repositories of the two metadata are equal + assertTrue( + Metadata.isGlobalStateEquals( + Metadata.builder(metadataNew) + .put(ProjectMetadata.builder(metadataNew.getProject(ProjectId.DEFAULT)).removeCustom(RepositoriesMetadata.TYPE)) + .build(), + Metadata.builder(metadataBwc).removeCustom(RepositoriesMetadata.TYPE).build() + ) + ); + } + + private static Metadata randomMetadata(int extraProjects) { + return randomMetadata(extraProjects, between(0, 5)); + } + + /** + * Randomly create a Metadata object with the default project and extra projects as specified. The default project + * has random RepositoriesMetadata which can also be null. Each extra project also has a random RepositoriesMetadata + * containing the specified number of RepositoryMetadata. If the specified number is -1, extra projects will not have + * any RepositoriesMetadata. + * @param extraProjects Number of extra projects to create + * @param numReposPerExtraProject Number of RepositoryMetadata for each extra project. Or -1 to ensure no + * RepositoriesMetadata for extra projects. + */ + private static Metadata randomMetadata(int extraProjects, int numReposPerExtraProject) { + final Metadata.Builder builder = Metadata.builder().put(randomProject(ProjectId.DEFAULT, between(0, 5))); + IntStream.range(0, extraProjects).forEach(i -> builder.put(randomProject(randomUniqueProjectId(), numReposPerExtraProject))); + return builder.build(); + } + + private static Tuple randomMetadataAndUpdate(int extraProjects) { + return randomMetadataAndUpdate(extraProjects, between(0, 5)); + } + + /** + * Randomly generate a metadata then randomly mutates its RepositoriesMetadata in all projects. + * @param extraProjects Number of extra projects + * @param numReposPerExtraProject Number of RepositoryMetadata the RepositoriesMetadata contains per project or -1 for null. + */ + private static Tuple randomMetadataAndUpdate(int extraProjects, int numReposPerExtraProject) { + final Metadata before = randomMetadata(extraProjects, numReposPerExtraProject); + + final Metadata.Builder builder = Metadata.builder(before); + + builder.forEachProject(b -> { + final RepositoriesMetadata repositoriesMetadata = b.getCustom(RepositoriesMetadata.TYPE); + if (ProjectId.DEFAULT.equals(b.getId()) || numReposPerExtraProject > 0) { + final RepositoriesMetadata mutatedRepositoriesMetadata = mutateRepositoriesMetadata(repositoriesMetadata); + if (mutatedRepositoriesMetadata != null) { + b.putCustom(RepositoriesMetadata.TYPE, mutatedRepositoriesMetadata); + } else { + b.removeCustom(RepositoriesMetadata.TYPE); + } + } + return b; + }); + + return new Tuple<>(before, builder.build()); + } + + /** + * Randomly create a ProjectMetadata object with the given ProjectId and RepositoriesMetadata containing specified + * number of RepositoryMetadata. If the number is -1, no RepositoriesMetadata will be created. + */ + private static ProjectMetadata randomProject(ProjectId projectId, int numRepos) { + final ProjectMetadata.Builder builder = ProjectMetadata.builder(projectId); + if (numRepos < 0) { + return builder.build(); + } + final RepositoriesMetadata repositoriesMetadata = randomRepositoriesMetadata(); + if (repositoriesMetadata == null) { + return builder.build(); + } else { + return builder.putCustom(RepositoriesMetadata.TYPE, repositoriesMetadata).build(); + } + } + + @Nullable + private static RepositoriesMetadata randomRepositoriesMetadata() { + return randomRepositoriesMetadata(between(0, 5)); + } + + private static RepositoriesMetadata randomRepositoriesMetadata(int numRepos) { + if (numRepos == 0) { + return randomBoolean() ? null : RepositoriesMetadata.EMPTY; + } + return new RepositoriesMetadata(randomList(numRepos, numRepos, MetadataRepositoriesMetadataTests::randomRepositoryMetadata)); + } + + private static RepositoryMetadata randomRepositoryMetadata() { + return new RepositoryMetadata(randomIdentifier(), randomUUID(), randomAlphaOfLengthBetween(3, 8), Settings.EMPTY); + } + + private static RepositoriesMetadata mutateRepositoriesMetadata(@Nullable RepositoriesMetadata repositoriesMetadata) { + if ((repositoriesMetadata == null || repositoriesMetadata.repositories().isEmpty()) && randomBoolean()) { + return randomRepositoriesMetadata(); + } + + if (randomBoolean()) { + return randomRepositoriesMetadata(); + } + + if (repositoriesMetadata == null || repositoriesMetadata.repositories().isEmpty()) { + return randomRepositoriesMetadata(between(1, 5)); + } + + return new RepositoriesMetadata(repositoriesMetadata.repositories().stream().map(repositoryMetadata -> switch (randomInt(3)) { + case 0 -> new RepositoryMetadata(randomIdentifier(), repositoryMetadata.uuid(), repositoryMetadata.type(), Settings.EMPTY); + case 1 -> new RepositoryMetadata(repositoryMetadata.name(), randomUUID(), repositoryMetadata.type(), Settings.EMPTY); + case 2 -> new RepositoryMetadata( + repositoryMetadata.name(), + repositoryMetadata.uuid(), + randomAlphaOfLengthBetween(3, 8), + Settings.EMPTY + ); + default -> new RepositoryMetadata( + repositoryMetadata.name(), + repositoryMetadata.uuid(), + repositoryMetadata.type(), + Settings.builder().put("base_path", randomIdentifier()).build() + ); + }).toList()); + } + + public static class TestBwcRepositoryMetadata extends AbstractNamedDiffable implements Metadata.ClusterCustom { + + private final List repositories; + + public TestBwcRepositoryMetadata(List repositories) { + this.repositories = repositories; + } + + public TestBwcRepositoryMetadata(StreamInput in) throws IOException { + this.repositories = in.readCollectionAsImmutableList(RepositoryMetadata::new); + } + + @Override + public EnumSet context() { + return Metadata.API_AND_GATEWAY; + } + + @Override + public TransportVersion getMinimalSupportedVersion() { + return TransportVersions.MINIMUM_COMPATIBLE; + } + + @Override + public String getWriteableName() { + return RepositoriesMetadata.TYPE; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeCollection(repositories); + } + + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(Metadata.ClusterCustom.class, RepositoriesMetadata.TYPE, in); + } + + @Override + public Iterator toXContentChunked(ToXContent.Params params) { + return null; + } + } +} diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index c885a9d0b6f93..6402111e331fc 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -812,6 +812,17 @@ public void testParseXContentFormatBeforeMultiProject() throws IOException { } } }, + "repositories": { + "my-repo": { + "type": "fs", + "uuid": "_my-repo-uuid_", + "settings": { + "location": "backup" + }, + "generation": 42, + "pending_generation": 42 + } + }, "reserved_state":{ } } } @@ -830,7 +841,7 @@ public void testParseXContentFormatBeforeMultiProject() throws IOException { ); assertThat( metadata.getProject().customs().keySet(), - containsInAnyOrder("persistent_tasks", "index-graveyard", "component_template") + containsInAnyOrder("persistent_tasks", "index-graveyard", "component_template", "repositories") ); final var projectTasks = PersistentTasksCustomMetadata.get(metadata.getProject()); assertThat( @@ -838,6 +849,105 @@ public void testParseXContentFormatBeforeMultiProject() throws IOException { containsInAnyOrder("upgrade-system-indices") ); assertThat(clusterTasks.getLastAllocationId(), equalTo(projectTasks.getLastAllocationId())); + assertThat(metadata.customs(), not(hasKey("repositories"))); + final var repositoriesMetadata = RepositoriesMetadata.get(metadata.getProject(ProjectId.DEFAULT)); + assertThat( + repositoriesMetadata.repositories(), + equalTo( + List.of( + new RepositoryMetadata("my-repo", "_my-repo-uuid_", "fs", Settings.builder().put("location", "backup").build(), 42, 42) + ) + ) + ); + } + + public void testParseXContentFormatBeforeRepositoriesMetadataMigration() throws IOException { + final String json = org.elasticsearch.core.Strings.format(""" + { + "meta-data": { + "version": 54321, + "cluster_uuid":"aba1aa1ababbbaabaabaab", + "cluster_uuid_committed":false, + "cluster_coordination":{ + "term":1, + "last_committed_config":[], + "last_accepted_config":[], + "voting_config_exclusions":[] + }, + "projects" : [ + { + "id" : "default", + "templates" : { + "template" : { + "order" : 0, + "index_patterns" : [ + "pattern1", + "pattern2" + ], + "mappings" : { + "key1" : { } + }, + "aliases" : { } + } + }, + "index-graveyard" : { + "tombstones" : [ ] + }, + "reserved_state" : { } + }, + { + "id" : "another_project", + "templates" : { + "template" : { + "order" : 0, + "index_patterns" : [ + "pattern1", + "pattern2" + ], + "mappings" : { + "key1" : { } + }, + "aliases" : { } + } + }, + "index-graveyard" : { + "tombstones" : [ ] + }, + "reserved_state" : { } + } + ], + "repositories": { + "my-repo": { + "type": "fs", + "uuid": "_my-repo-uuid_", + "settings": { + "location": "backup" + }, + "generation": 42, + "pending_generation": 42 + } + }, + "reserved_state":{ } + } + } + """, IndexVersion.current(), IndexVersion.current()); + + final Metadata metadata = fromJsonXContentStringWithPersistentTasks(json); + assertThat(metadata, notNullValue()); + assertThat(metadata.clusterUUID(), is("aba1aa1ababbbaabaabaab")); + + assertThat(metadata.projects().keySet(), containsInAnyOrder(ProjectId.fromId("default"), ProjectId.fromId("another_project"))); + assertThat(metadata.customs(), not(hasKey("repositories"))); + final var repositoriesMetadata = RepositoriesMetadata.get(metadata.getProject(ProjectId.DEFAULT)); + assertThat( + repositoriesMetadata.repositories(), + equalTo( + List.of( + new RepositoryMetadata("my-repo", "_my-repo-uuid_", "fs", Settings.builder().put("location", "backup").build(), 42, 42) + ) + ) + ); + assertThat(metadata.getProject(ProjectId.fromId("another_project")).customs(), not(hasKey("repositories"))); } private Metadata fromJsonXContentStringWithPersistentTasks(String json) throws IOException { @@ -2615,6 +2725,19 @@ public void testMultiProjectXContent() throws IOException { ) ) ) + .putCustom( + RepositoriesMetadata.TYPE, + new RepositoriesMetadata( + List.of( + new RepositoryMetadata( + "backup", + "uuid-" + project.id().id(), + "fs", + Settings.builder().put("location", project.id().id()).build() + ) + ) + ) + ) .build() ) .toList(); @@ -2677,6 +2800,19 @@ public void testMultiProjectXContent() throws IOException { final var projectTasks = PersistentTasksCustomMetadata.get(project); assertThat(projectTasks.getLastAllocationId(), equalTo(lastAllocationId)); assertThat(projectTasks.taskMap().keySet(), equalTo(Set.of(SystemIndexMigrationTaskParams.SYSTEM_INDEX_UPGRADE_TASK_NAME))); + assertThat( + RepositoriesMetadata.get(project).repositories(), + equalTo( + List.of( + new RepositoryMetadata( + "backup", + "uuid-" + project.id().id(), + "fs", + Settings.builder().put("location", project.id().id()).build() + ) + ) + ) + ); } final var clusterTasks = ClusterPersistentTasksCustomMetadata.get(fromXContentMeta); assertThat(clusterTasks.getLastAllocationId(), equalTo(lastAllocationId + 1)); @@ -2937,8 +3073,6 @@ public static int expectedChunkCount(ToXContent.Params params, Metadata metadata chunkCount += checkChunkSize(custom, params, 1); } else if (custom instanceof NodesShutdownMetadata nodesShutdownMetadata) { chunkCount += checkChunkSize(custom, params, 2 + nodesShutdownMetadata.getAll().size()); - } else if (custom instanceof RepositoriesMetadata repositoriesMetadata) { - chunkCount += checkChunkSize(custom, params, repositoriesMetadata.repositories().size()); } else { // could be anything, we have to just try it chunkCount += count(custom.toXContentChunked(params)); diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ProjectMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ProjectMetadataTests.java index ee84c63a0cc37..d692756d5f4c0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ProjectMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ProjectMetadataTests.java @@ -371,6 +371,8 @@ static int expectedChunkCount(ToXContent.Params params, ProjectMetadata project) chunkCount += checkChunkSize(custom, params, 2 + ingestMetadata.getPipelines().size()); } else if (custom instanceof PersistentTasksCustomMetadata persistentTasksCustomMetadata) { chunkCount += checkChunkSize(custom, params, 3 + persistentTasksCustomMetadata.tasks().size()); + } else if (custom instanceof RepositoriesMetadata repositoriesMetadata) { + chunkCount += checkChunkSize(custom, params, repositoriesMetadata.repositories().size()); } else { // could be anything, we have to just try it chunkCount += count(custom.toXContentChunked(params)); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java index d8c871d8c17ad..118849800c977 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java @@ -25,10 +25,10 @@ import java.util.Comparator; import java.util.List; -public class RepositoriesMetadataSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { +public class RepositoriesMetadataSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { @Override - protected Metadata.ClusterCustom createTestInstance() { + protected Metadata.ProjectCustom createTestInstance() { int numberOfRepositories = randomInt(10); List entries = new ArrayList<>(); for (int i = 0; i < numberOfRepositories; i++) { @@ -50,12 +50,12 @@ protected Metadata.ClusterCustom createTestInstance() { } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return RepositoriesMetadata::new; } @Override - protected Metadata.ClusterCustom mutateInstance(Metadata.ClusterCustom instance) { + protected Metadata.ProjectCustom mutateInstance(Metadata.ProjectCustom instance) { List entries = new ArrayList<>(((RepositoriesMetadata) instance).repositories()); boolean addEntry = entries.isEmpty() ? true : randomBoolean(); if (addEntry) { @@ -80,7 +80,7 @@ public Settings randomSettings() { } @Override - protected Metadata.ClusterCustom makeTestChanges(Metadata.ClusterCustom testInstance) { + protected Metadata.ProjectCustom makeTestChanges(Metadata.ProjectCustom testInstance) { RepositoriesMetadata repositoriesMetadata = (RepositoriesMetadata) testInstance; List repos = new ArrayList<>(repositoriesMetadata.repositories()); if (randomBoolean() && repos.size() > 1) { @@ -99,7 +99,7 @@ protected Metadata.ClusterCustom makeTestChanges(Metadata.ClusterCustom testInst } @Override - protected Writeable.Reader> diffReader() { + protected Writeable.Reader> diffReader() { return RepositoriesMetadata::readDiffFrom; } @@ -109,7 +109,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { } @Override - protected Metadata.ClusterCustom doParseInstance(XContentParser parser) throws IOException { + protected Metadata.ProjectCustom doParseInstance(XContentParser parser) throws IOException { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); RepositoriesMetadata repositoriesMetadata = RepositoriesMetadata.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index c5f943c529c5d..dc500220850e3 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -513,7 +513,7 @@ protected void assertDocCount(String index, long count) { */ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map metadata) throws Exception { final ClusterState state = clusterAdmin().prepareState(TEST_REQUEST_TIMEOUT).get().getState(); - final RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + final RepositoriesMetadata repositoriesMetadata = state.metadata().getProject().custom(RepositoriesMetadata.TYPE); assertNotNull(repositoriesMetadata); final RepositoryMetadata initialRepoMetadata = repositoriesMetadata.repository(repoName); assertNotNull(initialRepoMetadata); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java index e2218dfab1f1c..689568e8b74d3 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java @@ -8,8 +8,7 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.RepositoriesMetadata; -import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.common.xcontent.ChunkedToXContent; @@ -25,6 +24,7 @@ import org.elasticsearch.xpack.core.XPackPlugin; import java.util.Collections; +import java.util.Map; import java.util.UUID; import static org.hamcrest.Matchers.equalTo; @@ -64,14 +64,14 @@ public void testXContentSerializationOneSignedLicenseWithUsedTrial() throws Exce public void testLicenseMetadataParsingDoesNotSwallowOtherMetadata() throws Exception { License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(2)); LicensesMetadata licensesMetadata = new LicensesMetadata(license, TrialLicenseVersion.CURRENT); - RepositoryMetadata repositoryMetadata = new RepositoryMetadata("repo", "fs", Settings.EMPTY); - RepositoriesMetadata repositoriesMetadata = new RepositoriesMetadata(Collections.singletonList(repositoryMetadata)); + NodesShutdownMetadata nodesShutdownMetadata = new NodesShutdownMetadata(Map.of()); + final Metadata.Builder metadataBuilder = Metadata.builder(); if (randomBoolean()) { // random order of insertion metadataBuilder.putCustom(licensesMetadata.getWriteableName(), licensesMetadata); - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putCustom(nodesShutdownMetadata.getWriteableName(), nodesShutdownMetadata); } else { - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putCustom(nodesShutdownMetadata.getWriteableName(), nodesShutdownMetadata); metadataBuilder.putCustom(licensesMetadata.getWriteableName(), licensesMetadata); } // serialize metadata @@ -84,7 +84,7 @@ public void testLicenseMetadataParsingDoesNotSwallowOtherMetadata() throws Excep Metadata metadata = Metadata.Builder.fromXContent(createParser(builder)); // check that custom metadata still present assertThat(metadata.custom(licensesMetadata.getWriteableName()), notNullValue()); - assertThat(metadata.custom(repositoriesMetadata.getWriteableName()), notNullValue()); + assertThat(metadata.custom(nodesShutdownMetadata.getWriteableName()), notNullValue()); } public void testXContentSerializationOneTrial() throws Exception { diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index f6a35fb98203d..0b33b67770048 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -737,7 +737,7 @@ private static final class RepositoryUuidWatcher implements ClusterStateListener @Override public void clusterChanged(ClusterChangedEvent event) { - final RepositoriesMetadata repositoriesMetadata = event.state().metadata().custom(RepositoriesMetadata.TYPE); + final RepositoriesMetadata repositoriesMetadata = event.state().metadata().getProject().custom(RepositoriesMetadata.TYPE); if (repositoriesMetadata == null) { knownUuids.clear(); return; diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java index ff4c345bf3f6d..50d50769e30a1 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java @@ -8,8 +8,7 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.RepositoriesMetadata; -import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.test.ESTestCase; @@ -22,6 +21,7 @@ import org.elasticsearch.xpack.core.watcher.WatcherMetadata; import java.util.Collections; +import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -48,14 +48,13 @@ public void testWatcherMetadataParsingDoesNotSwallowOtherMetadata() throws Excep new Watcher(settings); // makes sure WatcherMetadata is registered in Custom Metadata boolean manuallyStopped = randomBoolean(); WatcherMetadata watcherMetadata = new WatcherMetadata(manuallyStopped); - RepositoryMetadata repositoryMetadata = new RepositoryMetadata("repo", "fs", Settings.EMPTY); - RepositoriesMetadata repositoriesMetadata = new RepositoriesMetadata(Collections.singletonList(repositoryMetadata)); + NodesShutdownMetadata nodesShutdownMetadata = new NodesShutdownMetadata(Map.of()); final Metadata.Builder metadataBuilder = Metadata.builder(); if (randomBoolean()) { // random order of insertion metadataBuilder.putCustom(watcherMetadata.getWriteableName(), watcherMetadata); - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putCustom(nodesShutdownMetadata.getWriteableName(), nodesShutdownMetadata); } else { - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putCustom(nodesShutdownMetadata.getWriteableName(), nodesShutdownMetadata); metadataBuilder.putCustom(watcherMetadata.getWriteableName(), watcherMetadata); } // serialize metadata @@ -70,7 +69,7 @@ public void testWatcherMetadataParsingDoesNotSwallowOtherMetadata() throws Excep Metadata metadata = Metadata.Builder.fromXContent(createParser(builder)); // check that custom metadata still present assertThat(metadata.getProject().custom(watcherMetadata.getWriteableName()), notNullValue()); - assertThat(metadata.custom(repositoriesMetadata.getWriteableName()), notNullValue()); + assertThat(metadata.custom(nodesShutdownMetadata.getWriteableName()), notNullValue()); } private static WatcherMetadata getWatcherMetadataFromXContent(XContentParser parser) throws Exception {