diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java index d994bd70eb7a0..0c65cc92c36ef 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.geoip.stats.GeoIpStatsAction; import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.IngestPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; @@ -115,7 +115,7 @@ public void cleanUp() throws Exception { .putNull("ingest.geoip.database_validity") ); assertBusy(() -> { - PersistentTasksCustomMetadata.PersistentTask task = getTask(); + PersistentTasksMetadataSection.PersistentTask task = getTask(); if (task != null) { GeoIpTaskState state = (GeoIpTaskState) task.getState(); assertThat(state.getDatabases(), anEmptyMap()); @@ -294,7 +294,7 @@ public void testGeoIpDatabasesDownloadNoGeoipProcessors() throws Exception { putGeoIpPipeline(pipelineId); updateClusterSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)); assertBusy(() -> { - PersistentTasksCustomMetadata.PersistentTask task = getTask(); + PersistentTasksMetadataSection.PersistentTask task = getTask(); assertNotNull(task); assertNotNull(task.getState()); }); @@ -467,7 +467,7 @@ private void verifyUpdatedDatabase() throws Exception { } private GeoIpTaskState getGeoIpTaskState() { - PersistentTasksCustomMetadata.PersistentTask task = getTask(); + PersistentTasksMetadataSection.PersistentTask task = getTask(); assertNotNull(task); GeoIpTaskState state = (GeoIpTaskState) task.getState(); assertNotNull(state); @@ -756,8 +756,8 @@ private void parseDatabase(Path tempFile) throws IOException { } } - private PersistentTasksCustomMetadata.PersistentTask getTask() { - return PersistentTasksCustomMetadata.getTaskWithId(clusterService().state(), GeoIpDownloader.GEOIP_DOWNLOADER); + private PersistentTasksMetadataSection.PersistentTask getTask() { + return PersistentTasksMetadataSection.getTaskWithId(clusterService().state(), GeoIpDownloader.GEOIP_DOWNLOADER); } private static class MultiByteArrayInputStream extends InputStream { diff --git a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java index 473f1fb498e76..cf13d47748168 100644 --- a/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java +++ b/modules/ingest-geoip/src/internalClusterTest/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksResponse; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.junit.After; @@ -48,7 +48,7 @@ public void cleanUp() { public void testTaskRemovedAfterCancellation() throws Exception { updateClusterSettings(Settings.builder().put(GeoIpDownloaderTaskExecutor.ENABLED_SETTING.getKey(), true)); assertBusy(() -> { - PersistentTasksCustomMetadata.PersistentTask task = getTask(); + PersistentTasksMetadataSection.PersistentTask task = getTask(); assertNotNull(task); assertTrue(task.isAssigned()); }); @@ -63,7 +63,7 @@ public void testTaskRemovedAfterCancellation() throws Exception { }); } - private PersistentTasksCustomMetadata.PersistentTask getTask() { - return PersistentTasksCustomMetadata.getTaskWithId(clusterService().state(), GeoIpDownloader.GEOIP_DOWNLOADER); + private PersistentTasksMetadataSection.PersistentTask getTask() { + return PersistentTasksMetadataSection.getTaskWithId(clusterService().state(), GeoIpDownloader.GEOIP_DOWNLOADER); } } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java index dcb882ede230c..f5c416242361e 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/DatabaseNodeService.java @@ -31,7 +31,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.ingest.IngestService; import org.elasticsearch.ingest.geoip.stats.CacheStats; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.watcher.ResourceWatcherService; @@ -251,7 +251,7 @@ void checkDatabases(ClusterState state) { return; } - PersistentTasksCustomMetadata persistentTasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); if (persistentTasks == null) { logger.trace("Not checking databases because persistent tasks are null"); return; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java index 9645e34751642..bd5a25ebf6ff8 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloader.java @@ -33,7 +33,7 @@ import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; import org.elasticsearch.ingest.geoip.direct.DatabaseConfigurationMetadata; import org.elasticsearch.persistent.AllocatedPersistentTask; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -156,7 +156,7 @@ void updateDatabases() throws IOException { } logger.trace("Updating geoip databases"); - IngestGeoIpMetadata geoIpMeta = clusterState.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + IngestGeoIpMetadata geoIpMeta = clusterState.metadata().section(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); // if there are entries in the cs that aren't in the persistent task state, // then download those (only) diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java index 8fc46fe157548..55d0e5ed35a0c 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTaskExecutor.java @@ -27,8 +27,8 @@ import org.elasticsearch.ingest.IngestService; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -126,7 +126,7 @@ protected EnterpriseGeoIpDownloader createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, + PersistentTasksMetadataSection.PersistentTask taskInProgress, Map headers ) { return new EnterpriseGeoIpDownloader( diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java index 57e944ef9b994..c29a0ca1ed19b 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpTaskState.java @@ -19,7 +19,7 @@ import org.elasticsearch.ingest.EnterpriseGeoIpTask; import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentBuilder; @@ -33,7 +33,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; +import static org.elasticsearch.persistent.PersistentTasksMetadataSection.getTaskWithId; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; class EnterpriseGeoIpTaskState implements PersistentTaskState, VersionedNamedWriteable { @@ -146,7 +146,7 @@ public void writeTo(StreamOutput out) throws IOException { */ @Nullable static EnterpriseGeoIpTaskState getEnterpriseGeoIpTaskState(ClusterState state) { - PersistentTasksCustomMetadata.PersistentTask task = getTaskWithId(state, EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER); + PersistentTasksMetadataSection.PersistentTask task = getTaskWithId(state, EnterpriseGeoIpTask.ENTERPRISE_GEOIP_DOWNLOADER); return (task == null) ? null : (EnterpriseGeoIpTaskState) task.getState(); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java index ee6f2f16f051b..649ad11132295 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloader.java @@ -33,7 +33,7 @@ import org.elasticsearch.ingest.geoip.GeoIpTaskState.Metadata; import org.elasticsearch.ingest.geoip.stats.GeoIpDownloaderStats; import org.elasticsearch.persistent.AllocatedPersistentTask; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java index 3f89bb1dd5c50..9b99e32edfd78 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutor.java @@ -35,8 +35,8 @@ import org.elasticsearch.ingest.PipelineConfiguration; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -169,7 +169,7 @@ protected GeoIpDownloader createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, + PersistentTasksMetadataSection.PersistentTask taskInProgress, Map headers ) { return new GeoIpDownloader( @@ -362,7 +362,7 @@ private void startTask(Runnable onFailure) { } private void stopTask(Runnable onFailure) { - ActionListener> listener = ActionListener.wrap( + ActionListener> listener = ActionListener.wrap( r -> logger.debug("Stopped geoip downloader task"), e -> { Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e; diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java index 56f96786d9b7f..5d98dc5ff70df 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/GeoIpTaskState.java @@ -19,7 +19,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.ToXContentObject; @@ -36,7 +36,7 @@ import java.util.stream.Collectors; import static org.elasticsearch.ingest.geoip.GeoIpDownloader.GEOIP_DOWNLOADER; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.getTaskWithId; +import static org.elasticsearch.persistent.PersistentTasksMetadataSection.getTaskWithId; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; import static org.elasticsearch.xcontent.ConstructingObjectParser.optionalConstructorArg; @@ -241,7 +241,7 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws */ @Nullable static GeoIpTaskState getGeoIpTaskState(ClusterState state) { - PersistentTasksCustomMetadata.PersistentTask task = getTaskWithId(state, GeoIpDownloader.GEOIP_DOWNLOADER); + PersistentTasksMetadataSection.PersistentTask task = getTaskWithId(state, GeoIpDownloader.GEOIP_DOWNLOADER); return (task == null) ? null : (GeoIpTaskState) task.getState(); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java index f5ac755b6b980..c762b91086fba 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpMetadata.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -36,7 +37,7 @@ /** * Holds the ingest-geoip databases that are available in the cluster state. */ -public final class IngestGeoIpMetadata implements Metadata.Custom { +public final class IngestGeoIpMetadata implements MetadataSection { public static final String TYPE = "ingest_geoip"; private static final ParseField DATABASES_FIELD = new ParseField("databases"); @@ -100,11 +101,11 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom before) { + public Diff diff(MetadataSection before) { return new GeoIpMetadataDiff((IngestGeoIpMetadata) before, this); } - static class GeoIpMetadataDiff implements NamedDiff { + static class GeoIpMetadataDiff implements NamedDiff { final Diff> databases; @@ -122,7 +123,7 @@ static class GeoIpMetadataDiff implements NamedDiff { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new IngestGeoIpMetadata(databases.apply(((IngestGeoIpMetadata) part).databases)); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java index e606688ad60a0..128f7f76b6f50 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/IngestGeoIpPlugin.java @@ -15,7 +15,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -225,7 +225,7 @@ public List getNamedXContent() { @Override public List getNamedWriteables() { return List.of( - new NamedWriteableRegistry.Entry(Metadata.Custom.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.GeoIpMetadataDiff::new), new NamedWriteableRegistry.Entry(PersistentTaskState.class, GEOIP_DOWNLOADER, GeoIpTaskState::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, GEOIP_DOWNLOADER, GeoIpTaskParams::new), diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java index 4f9b9062332e4..0c369cbec1e02 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportDeleteDatabaseConfigurationAction.java @@ -87,7 +87,7 @@ public TransportDeleteDatabaseConfigurationAction( protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { final String id = request.getDatabaseId(); - final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + final IngestGeoIpMetadata geoIpMeta = state.metadata().section(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); if (geoIpMeta.getDatabases().containsKey(id) == false) { throw new ResourceNotFoundException("Database configuration not found: {}", id); } @@ -103,7 +103,7 @@ private record DeleteDatabaseConfigurationTask(ActionListener databases = new HashMap<>(geoIpMeta.getDatabases()); @@ -111,7 +111,7 @@ ClusterState execute(ClusterState currentState) throws Exception { Metadata currentMeta = currentState.metadata(); return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentMeta).putCustom(IngestGeoIpMetadata.TYPE, new IngestGeoIpMetadata(databases))) + .metadata(Metadata.builder(currentMeta).putSection(IngestGeoIpMetadata.TYPE, new IngestGeoIpMetadata(databases))) .build(); } diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java index ae090dc4c64f6..75e6879673a0e 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportGetDatabaseConfigurationAction.java @@ -78,7 +78,7 @@ protected void masterOperation( ); } - final IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + final IngestGeoIpMetadata geoIpMeta = state.metadata().section(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); List results = new ArrayList<>(); for (String id : ids) { diff --git a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java index 490a9edbec89a..4a8ea134c45d1 100644 --- a/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java +++ b/modules/ingest-geoip/src/main/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationAction.java @@ -108,7 +108,7 @@ static boolean isNoopUpdate(@Nullable DatabaseConfigurationMetadata existingData static void validatePrerequisites(DatabaseConfiguration database, ClusterState state) { // we need to verify that the database represents a unique file (name) among the various databases for this same provider - IngestGeoIpMetadata geoIpMeta = state.metadata().custom(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); + IngestGeoIpMetadata geoIpMeta = state.metadata().section(IngestGeoIpMetadata.TYPE, IngestGeoIpMetadata.EMPTY); Optional sameName = geoIpMeta.getDatabases() .values() @@ -131,7 +131,7 @@ private record UpdateDatabaseConfigurationTask(ActionListener task = new PersistentTask<>(taskId, GeoIpDownloader.GEOIP_DOWNLOADER, new GeoIpTaskParams(), 1, null); task = new PersistentTask<>(task, new GeoIpTaskState(Map.of("GeoIP2-City.mmdb", new GeoIpTaskState.Metadata(10, 5, 14, md5, 10)))); - PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of(taskId, task)); + PersistentTasksMetadataSection tasksCustomMetadata = new PersistentTasksMetadataSection(1L, Map.of(taskId, task)); ClusterState state = createClusterState(tasksCustomMetadata); @@ -169,7 +169,7 @@ public void testCheckDatabases() throws Exception { task, new GeoIpTaskState(Map.of("GeoIP2-City.mmdb", new GeoIpTaskState.Metadata(10, 5, 14, md5, System.currentTimeMillis()))) ); - tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of(taskId, task)); + tasksCustomMetadata = new PersistentTasksMetadataSection(1L, Map.of(taskId, task)); state = createClusterState(tasksCustomMetadata); @@ -192,7 +192,7 @@ public void testCheckDatabases_dontCheckDatabaseOnNonIngestNode() throws Excepti String taskId = GeoIpDownloader.GEOIP_DOWNLOADER; PersistentTask task = new PersistentTask<>(taskId, GeoIpDownloader.GEOIP_DOWNLOADER, new GeoIpTaskParams(), 1, null); task = new PersistentTask<>(task, new GeoIpTaskState(Map.of("GeoIP2-City.mmdb", new GeoIpTaskState.Metadata(0L, 0, 9, md5, 10)))); - PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of(taskId, task)); + PersistentTasksMetadataSection tasksCustomMetadata = new PersistentTasksMetadataSection(1L, Map.of(taskId, task)); ClusterState state = ClusterState.builder(createClusterState(tasksCustomMetadata)) .nodes( @@ -215,10 +215,10 @@ public void testCheckDatabases_dontCheckDatabaseWhenNoDatabasesIndex() throws Ex String taskId = GeoIpDownloader.GEOIP_DOWNLOADER; PersistentTask task = new PersistentTask<>(taskId, GeoIpDownloader.GEOIP_DOWNLOADER, new GeoIpTaskParams(), 1, null); task = new PersistentTask<>(task, new GeoIpTaskState(Map.of("GeoIP2-City.mmdb", new GeoIpTaskState.Metadata(0L, 0, 9, md5, 10)))); - PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of(taskId, task)); + PersistentTasksMetadataSection tasksCustomMetadata = new PersistentTasksMetadataSection(1L, Map.of(taskId, task)); ClusterState state = ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(TYPE, tasksCustomMetadata).build()) + .metadata(Metadata.builder().putSection(TYPE, tasksCustomMetadata).build()) .nodes(new DiscoveryNodes.Builder().add(DiscoveryNodeUtils.create("_id1")).localNodeId("_id1")) .build(); @@ -231,7 +231,7 @@ public void testCheckDatabases_dontCheckDatabaseWhenNoDatabasesIndex() throws Ex } public void testCheckDatabases_dontCheckDatabaseWhenGeoIpDownloadTask() throws Exception { - PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(0L, Map.of()); + PersistentTasksMetadataSection tasksCustomMetadata = new PersistentTasksMetadataSection(0L, Map.of()); ClusterState state = createClusterState(tasksCustomMetadata); @@ -349,11 +349,11 @@ private String mockSearches(String databaseName, int firstChunk, int lastChunk) return MessageDigests.toHexString(md.digest()); } - static ClusterState createClusterState(PersistentTasksCustomMetadata tasksCustomMetadata) { + static ClusterState createClusterState(PersistentTasksMetadataSection tasksCustomMetadata) { return createClusterState(tasksCustomMetadata, false); } - static ClusterState createClusterState(PersistentTasksCustomMetadata tasksCustomMetadata, boolean noStartedShards) { + static ClusterState createClusterState(PersistentTasksMetadataSection tasksCustomMetadata, boolean noStartedShards) { boolean aliasGeoipDatabase = randomBoolean(); String indexName = aliasGeoipDatabase ? GeoIpDownloader.DATABASES_INDEX + "-" + randomAlphaOfLength(5) @@ -377,7 +377,7 @@ static ClusterState createClusterState(PersistentTasksCustomMetadata tasksCustom shardRouting = shardRouting.moveToStarted(ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE); } return ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(TYPE, tasksCustomMetadata).put(idxMeta)) + .metadata(Metadata.builder().putSection(TYPE, tasksCustomMetadata).put(idxMeta)) .nodes(DiscoveryNodes.builder().add(DiscoveryNodeUtils.create("_id1")).localNodeId("_id1")) .routingTable( RoutingTable.builder() diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java index 203ecaea72c0e..9213601e165ca 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/EnterpriseGeoIpDownloaderTests.java @@ -35,7 +35,7 @@ import org.elasticsearch.ingest.EnterpriseGeoIpTask; import org.elasticsearch.ingest.geoip.direct.DatabaseConfiguration; import org.elasticsearch.node.Node; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.test.ESTestCase; @@ -90,7 +90,7 @@ public void setup() throws IOException { when(clusterService.getClusterSettings()).thenReturn( new ClusterSettings(Settings.EMPTY, Set.of(GeoIpDownloaderTaskExecutor.POLL_INTERVAL_SETTING)) ); - ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); + ClusterState state = createClusterState(new PersistentTasksMetadataSection(1L, Map.of())); when(clusterService.state()).thenReturn(state); client = new MockClient(threadPool); geoIpDownloader = new EnterpriseGeoIpDownloader( @@ -464,7 +464,7 @@ void deleteOldChunks(String name, int firstChunk) { } public void testUpdateDatabasesWriteBlock() { - ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); + ClusterState state = createClusterState(new PersistentTasksMetadataSection(1L, Map.of())); var geoIpIndex = state.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); state = ClusterState.builder(state) .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) @@ -486,7 +486,7 @@ public void testUpdateDatabasesWriteBlock() { } public void testUpdateDatabasesIndexNotReady() throws IOException { - ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true); + ClusterState state = createClusterState(new PersistentTasksMetadataSection(1L, Map.of()), true); var geoIpIndex = state.getMetadata().getIndicesLookup().get(EnterpriseGeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); state = ClusterState.builder(state) .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java index b33b66696cfca..3ceb03b8cc53e 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTaskExecutorTests.java @@ -38,7 +38,7 @@ public void testHasAtLeastOneGeoipProcessorWhenDownloadDatabaseOnPipelineCreatio when(clusterState.getMetadata()).thenReturn(metadata); final IngestMetadata[] ingestMetadata = new IngestMetadata[1]; - when(metadata.custom(IngestMetadata.TYPE)).thenAnswer(invocationOnmock -> ingestMetadata[0]); + when(metadata.section(IngestMetadata.TYPE)).thenAnswer(invocationOnmock -> ingestMetadata[0]); final Settings[] indexSettings = new Settings[1]; IndexMetadata indexMetadata = mock(IndexMetadata.class); @@ -68,7 +68,7 @@ public void testHasAtLeastOneGeoipProcessor() throws IOException { final IngestMetadata[] ingestMetadata = new IngestMetadata[1]; ClusterState clusterState = mock(ClusterState.class); Metadata metadata = mock(Metadata.class); - when(metadata.custom(IngestMetadata.TYPE)).thenAnswer(invocationOnmock -> ingestMetadata[0]); + when(metadata.section(IngestMetadata.TYPE)).thenAnswer(invocationOnmock -> ingestMetadata[0]); when(clusterState.getMetadata()).thenReturn(metadata); List expectHitsInputs = getPipelinesWithGeoIpProcessors(true); List expectMissesInputs = getPipelinesWithoutGeoIpProcessors(); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java index 984bd37181fe7..305e62b5ae014 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpDownloaderTests.java @@ -38,8 +38,8 @@ import org.elasticsearch.node.Node; import org.elasticsearch.persistent.PersistentTaskResponse; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -104,7 +104,7 @@ public void setup() throws IOException { ) ) ); - ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); + ClusterState state = createClusterState(new PersistentTasksMetadataSection(1L, Map.of())); when(clusterService.state()).thenReturn(state); client = new MockClient(threadPool); geoIpDownloader = new GeoIpDownloader( @@ -578,7 +578,7 @@ void processDatabase(Map databaseInfo) { } public void testUpdateDatabasesWriteBlock() { - ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of())); + ClusterState state = createClusterState(new PersistentTasksMetadataSection(1L, Map.of())); var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); state = ClusterState.builder(state) .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) @@ -600,7 +600,7 @@ public void testUpdateDatabasesWriteBlock() { } public void testUpdateDatabasesIndexNotReady() { - ClusterState state = createClusterState(new PersistentTasksCustomMetadata(1L, Map.of()), true); + ClusterState state = createClusterState(new PersistentTasksMetadataSection(1L, Map.of()), true); var geoIpIndex = state.getMetadata().getIndicesLookup().get(GeoIpDownloader.DATABASES_INDEX).getWriteIndex().getName(); state = ClusterState.builder(state) .blocks(new ClusterBlocks.Builder().addIndexBlock(geoIpIndex, IndexMetadata.INDEX_READ_ONLY_ALLOW_DELETE_BLOCK)) @@ -632,8 +632,8 @@ public void testThatRunDownloaderDeletesExpiredDatabases() { client.addHandler( UpdatePersistentTaskStatusAction.INSTANCE, (UpdatePersistentTaskStatusAction.Request request, ActionListener taskResponseListener) -> { - PersistentTasksCustomMetadata.Assignment assignment = mock(PersistentTasksCustomMetadata.Assignment.class); - PersistentTasksCustomMetadata.PersistentTask persistentTask = new PersistentTasksCustomMetadata.PersistentTask<>( + PersistentTasksMetadataSection.Assignment assignment = mock(PersistentTasksMetadataSection.Assignment.class); + PersistentTasksMetadataSection.PersistentTask persistentTask = new PersistentTasksMetadataSection.PersistentTask<>( GeoIpDownloader.GEOIP_DOWNLOADER, GeoIpDownloader.GEOIP_DOWNLOADER, new GeoIpTaskParams(), diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java index a0541df0d4d8a..76a129a675b37 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/GeoIpProcessorFactoryTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.ingest.Processor; import org.elasticsearch.ingest.RandomDocumentPicks; import org.elasticsearch.ingest.geoip.Database.Property; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; import org.elasticsearch.threadpool.ThreadPool; @@ -450,12 +450,12 @@ public void testDownloadDatabaseOnPipelineCreation() throws IOException { } public void testDefaultDatabaseWithTaskPresent() throws Exception { - PersistentTasksCustomMetadata tasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection tasks = PersistentTasksMetadataSection.builder() .addTask(GeoIpDownloader.GEOIP_DOWNLOADER, GeoIpDownloader.GEOIP_DOWNLOADER, null, null) .updateTaskState(GeoIpDownloader.GEOIP_DOWNLOADER, GeoIpTaskState.EMPTY) .build(); ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks)) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasks)) .build(); when(clusterService.state()).thenReturn(clusterState); GeoIpProcessor.Factory factory = new GeoIpProcessor.Factory(databaseNodeService); diff --git a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java index 710c3ee23916d..caac834e465bc 100644 --- a/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java +++ b/modules/ingest-geoip/src/test/java/org/elasticsearch/ingest/geoip/direct/TransportPutDatabaseConfigurationActionTests.java @@ -23,7 +23,7 @@ public void testValidatePrerequisites() { String name = randomAlphaOfLengthBetween(1, 50); IngestGeoIpMetadata ingestGeoIpMetadata = randomIngestGeoIpMetadata(name); ClusterState state = ClusterState.builder(ClusterState.EMPTY_STATE) - .metadata(Metadata.builder(Metadata.EMPTY_METADATA).putCustom(IngestGeoIpMetadata.TYPE, ingestGeoIpMetadata)) + .metadata(Metadata.builder(Metadata.EMPTY_METADATA).putSection(IngestGeoIpMetadata.TYPE, ingestGeoIpMetadata)) .build(); DatabaseConfiguration databaseConfiguration = randomDatabaseConfiguration(randomIdentifier(), name); expectThrows( diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java index ac850e991296c..40da1374c9d5e 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/FeatureMigrationIT.java @@ -165,7 +165,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { assertThat(innerMap, hasEntry("innerKey", "innerValue")); // We shouldn't have any results in the cluster state as no features have fully finished yet. - FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = clusterState.metadata().section(FeatureMigrationResults.TYPE); assertThat(currentResults, nullValue()); postUpgradeHookCalled.set(true); }); @@ -198,7 +198,7 @@ public void testMigrateInternalManagedSystemIndex() throws Exception { Metadata finalMetadata = clusterAdmin().prepareState().get().getState().metadata(); // Check that the results metadata is what we expect. - FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = finalMetadata.section(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); @@ -276,7 +276,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { ) ); Metadata newMetadata = Metadata.builder(currentState.metadata()) - .putCustom(FeatureMigrationResults.TYPE, newResults) + .putSection(FeatureMigrationResults.TYPE, newResults) .build(); return ClusterState.builder(currentState).metadata(newMetadata).build(); } diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java index 8f9c2b7f34105..955fa75777e2c 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/MultiFeatureMigrationIT.java @@ -110,7 +110,7 @@ public void testMultipleFeatureMigration() throws Exception { metadata.put("stringKey", "first plugin value"); // We shouldn't have any results in the cluster state given no features have finished yet. - FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = clusterState.metadata().section(FeatureMigrationResults.TYPE); assertThat(currentResults, nullValue()); preMigrationHookCalled.set(true); @@ -127,7 +127,7 @@ public void testMultipleFeatureMigration() throws Exception { assertThat(metadata, hasEntry("stringKey", "first plugin value")); // We shouldn't have any results in the cluster state given no features have finished yet. - FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = clusterState.metadata().section(FeatureMigrationResults.TYPE); assertThat(currentResults, nullValue()); postMigrationHookCalled.set(true); @@ -144,7 +144,7 @@ public void testMultipleFeatureMigration() throws Exception { metadata.put("stringKey", "second plugin value"); // But now, we should have results, as we're in a new feature! - FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = clusterState.metadata().section(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); @@ -165,7 +165,7 @@ public void testMultipleFeatureMigration() throws Exception { assertThat(metadata, hasEntry("stringKey", "second plugin value")); // And here, the results should be the same, as we haven't updated the state with this feature's status yet. - FeatureMigrationResults currentResults = clusterState.metadata().custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = clusterState.metadata().section(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(1), hasKey(FEATURE_NAME))); assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); @@ -205,7 +205,7 @@ public void testMultipleFeatureMigration() throws Exception { Metadata finalMetadata = clusterAdmin().prepareState().get().getState().metadata(); // Check that the results metadata is what we expect - FeatureMigrationResults currentResults = finalMetadata.custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = finalMetadata.section(FeatureMigrationResults.TYPE); assertThat(currentResults, notNullValue()); assertThat(currentResults.getFeatureStatuses(), allOf(aMapWithSize(2), hasKey(FEATURE_NAME), hasKey(SECOND_FEATURE_NAME))); assertThat(currentResults.getFeatureStatuses().get(FEATURE_NAME).succeeded(), is(true)); diff --git a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java index 47c6e8faf15bf..419a2fb85ae0c 100644 --- a/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java +++ b/modules/reindex/src/internalClusterTest/java/org/elasticsearch/migration/SystemIndexMigrationIT.java @@ -19,7 +19,7 @@ import org.elasticsearch.cluster.ClusterStateListener; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.test.InternalTestCluster; @@ -66,7 +66,7 @@ public void testSystemIndexMigrationCanBeInterruptedWithShutdown() throws Except createSystemIndexForDescriptor(INTERNAL_MANAGED); final ClusterStateListener clusterStateListener = event -> { - PersistentTasksCustomMetadata.PersistentTask task = PersistentTasksCustomMetadata.getTaskWithId( + PersistentTasksMetadataSection.PersistentTask task = PersistentTasksMetadataSection.getTaskWithId( event.state(), SYSTEM_INDEX_UPGRADE_TASK_NAME ); diff --git a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java index d51ac416d2759..f0f4295b299ae 100644 --- a/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java +++ b/qa/smoke-test-http/src/javaRestTest/java/org/elasticsearch/http/ClusterStateRestCancellationIT.java @@ -40,7 +40,7 @@ public class ClusterStateRestCancellationIT extends HttpSmokeTestCase { @Override protected Collection> nodePlugins() { - return CollectionUtils.appendToCopy(super.nodePlugins(), AssertingCustomPlugin.class); + return CollectionUtils.appendToCopy(super.nodePlugins(), AssertingSectionPlugin.class); } private void updateClusterState(ClusterService clusterService, UnaryOperator updateOperator) { @@ -67,7 +67,10 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) public void testClusterStateRestCancellation() throws Exception { final ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()); - updateClusterState(clusterService, s -> ClusterState.builder(s).putCustom(AssertingCustom.NAME, AssertingCustom.INSTANCE).build()); + updateClusterState( + clusterService, + s -> ClusterState.builder(s).putCustom(AssertingSection.NAME, AssertingSection.INSTANCE).build() + ); final Request clusterStateRequest = new Request(HttpGet.METHOD_NAME, "/_cluster/state"); clusterStateRequest.addParameter("wait_for_metadata_version", Long.toString(Long.MAX_VALUE)); @@ -93,13 +96,13 @@ public void testClusterStateRestCancellation() throws Exception { assertTrue(tasks.toString(), tasks.stream().noneMatch(t -> t.action().equals(ClusterStateAction.NAME))); }); - updateClusterState(clusterService, s -> ClusterState.builder(s).removeCustom(AssertingCustom.NAME).build()); + updateClusterState(clusterService, s -> ClusterState.builder(s).removeCustom(AssertingSection.NAME).build()); } - private static class AssertingCustom implements SimpleDiffable, ClusterState.Custom { + private static class AssertingSection implements SimpleDiffable, ClusterState.Custom { static final String NAME = "asserting"; - static final AssertingCustom INSTANCE = new AssertingCustom(); + static final AssertingSection INSTANCE = new AssertingSection(); @Override public String getWriteableName() { @@ -122,11 +125,11 @@ public Iterator toXContentChunked(ToXContent.Params params } } - public static class AssertingCustomPlugin extends Plugin { + public static class AssertingSectionPlugin extends Plugin { @Override public List getNamedWriteables() { return Collections.singletonList( - new NamedWriteableRegistry.Entry(ClusterState.Custom.class, AssertingCustom.NAME, in -> AssertingCustom.INSTANCE) + new NamedWriteableRegistry.Entry(ClusterState.Custom.class, AssertingSection.NAME, in -> AssertingSection.INSTANCE) ); } } diff --git a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java index 32d8be475dbbe..1210398c46b24 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/action/admin/cluster/node/tasks/TasksIT.java @@ -38,7 +38,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.health.node.selection.HealthNode; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.RemovedTaskListener; @@ -722,12 +722,12 @@ public void testTasksGetWaitForNoTask() throws Exception { public void testTasksWaitForAllTask() throws Exception { // Find tasks that are not expected to complete and identify the nodes running them - List> alwaysRunningTasks = findTasks( + List> alwaysRunningTasks = findTasks( clusterService().state(), HealthNode.TASK_NAME ); Set nodesRunningTasks = alwaysRunningTasks.stream() - .map(PersistentTasksCustomMetadata.PersistentTask::getExecutorNode) + .map(PersistentTasksMetadataSection.PersistentTask::getExecutorNode) .collect(Collectors.toSet()); // Spin up a request to wait for all tasks in the cluster to make sure it doesn't cause an infinite loop ListTasksResponse response = clusterAdmin().prepareListTasks().setWaitForCompletion(true).setTimeout(timeValueSeconds(1)).get(); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java index ffa2cd29778ad..42bc4e827e48d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/ClusterStateDiffIT.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -660,16 +661,16 @@ private AliasMetadata randomAlias() { * Randomly adds, deletes or updates repositories in the metadata */ private Metadata randomMetadataCustoms(final Metadata metadata) { - return randomParts(metadata, "custom", new RandomPart() { + return randomParts(metadata, "custom", new RandomPart() { @Override - public Map parts(Metadata metadata) { + public Map parts(Metadata metadata) { return metadata.customs(); } @Override - public Metadata.Builder put(Metadata.Builder builder, Metadata.Custom part) { - return builder.putCustom(part.getWriteableName(), part); + public Metadata.Builder put(Metadata.Builder builder, MetadataSection part) { + return builder.putSection(part.getWriteableName(), part); } @Override @@ -678,12 +679,12 @@ public Metadata.Builder remove(Metadata.Builder builder, String name) { // there must always be at least an empty graveyard return builder.indexGraveyard(IndexGraveyard.builder().build()); } else { - return builder.removeCustom(name); + return builder.removeSection(name); } } @Override - public Metadata.Custom randomCreate(String name) { + public MetadataSection randomCreate(String name) { if (randomBoolean()) { return new RepositoriesMetadata(Collections.emptyList()); } else { @@ -692,7 +693,7 @@ public Metadata.Custom randomCreate(String name) { } @Override - public Metadata.Custom randomChange(Metadata.Custom part) { + public MetadataSection randomChange(MetadataSection part) { return part; } }); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java index 3dba41adec08b..6140ffafe84dc 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/cluster/SimpleClusterStateIT.java @@ -64,7 +64,7 @@ public class SimpleClusterStateIT extends ESIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(PrivateCustomPlugin.class); + return Collections.singletonList(PrivateSectionPlugin.class); } @Before @@ -334,15 +334,15 @@ public void testPrivateCustomsAreExcluded() throws Exception { assertFalse(clusterStateResponse.getState().customs().containsKey("test")); } - private static class TestCustom extends AbstractNamedDiffable implements ClusterState.Custom { + private static class TestSection extends AbstractNamedDiffable implements ClusterState.Custom { private final int value; - TestCustom(int value) { + TestSection(int value) { this.value = value; } - TestCustom(StreamInput in) throws IOException { + TestSection(StreamInput in) throws IOException { this.value = in.readInt(); } @@ -376,15 +376,15 @@ public boolean isPrivate() { } } - public static class PrivateCustomPlugin extends Plugin implements ClusterPlugin { + public static class PrivateSectionPlugin extends Plugin implements ClusterPlugin { - public PrivateCustomPlugin() {} + public PrivateSectionPlugin() {} @Override public List getNamedWriteables() { List entries = new ArrayList<>(); - entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, "test", TestCustom::new)); - entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, "test", TestCustom::readDiffFrom)); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, "test", TestSection::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, "test", TestSection::readDiffFrom)); return entries; } @@ -411,7 +411,7 @@ public Collection createComponents(PluginServices services) { public ClusterState execute(ClusterState currentState) { if (currentState.custom("test") == null) { final ClusterState.Builder builder = ClusterState.builder(currentState); - builder.putCustom("test", new TestCustom(42)); + builder.putCustom("test", new TestSection(42)); return builder.build(); } else { return currentState; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java index e05bda69d2c9c..144b131b90f42 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/gateway/GatewayIndexStateIT.java @@ -561,7 +561,7 @@ public void testHalfDeletedIndexImport() throws Exception { // term in the coordination metadata .coordinationMetadata(CoordinationMetadata.builder(metadata.coordinationMetadata()).term(0L).build()) // add a tombstone but do not delete the index metadata from disk - .putCustom(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) + .putSection(IndexGraveyard.TYPE, IndexGraveyard.builder().addTombstone(metadata.index("test").getIndex()).build()) .build() ); NodeMetadata.FORMAT.writeAndCleanup(new NodeMetadata(nodeId, BuildVersion.current(), metadata.oldestIndexVersion()), paths); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java index ec193a37eeab7..1d00dceea9a52 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTaskInitializationFailureIT.java @@ -43,7 +43,7 @@ protected Collection> nodePlugins() { public void testPersistentTasksThatFailDuringInitializationAreRemovedFromClusterState() throws Exception { PersistentTasksService persistentTasksService = internalCluster().getInstance(PersistentTasksService.class); - PlainActionFuture> startPersistentTaskFuture = + PlainActionFuture> startPersistentTaskFuture = new PlainActionFuture<>(); persistentTasksService.sendStartRequest( UUIDs.base64UUID(), @@ -56,7 +56,7 @@ public void testPersistentTasksThatFailDuringInitializationAreRemovedFromCluster assertBusy(() -> { final ClusterService clusterService = internalCluster().getAnyMasterNodeInstance(ClusterService.class); - List> tasks = findTasks( + List> tasks = findTasks( clusterService.state(), FailingInitializationPersistentTaskExecutor.TASK_NAME ); @@ -141,7 +141,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, + PersistentTasksMetadataSection.PersistentTask taskInProgress, Map headers ) { return new AllocatedPersistentTask(id, type, action, "", parentTaskId, headers) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java index 73c9495a2cd2f..9c85b6636fcff 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorFullRestartIT.java @@ -9,7 +9,7 @@ import org.elasticsearch.action.support.PlainActionFuture; import org.elasticsearch.common.UUIDs; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.plugins.Plugin; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java index d71718f3f3a6b..e5c3a0450b48c 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/PersistentTasksExecutorIT.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService.WaitForPersistentTaskListener; import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; diff --git a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java index e7d23f97fc992..2361f7509e9ac 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/persistent/decider/EnableAssignmentDeciderIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.persistent.TestPersistentTasksPlugin; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; @@ -58,7 +58,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { latch.await(); ClusterService clusterService = internalCluster().clusterService(internalCluster().getMasterName()); - PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); assertEquals(numberOfTasks, tasks.tasks().stream().filter(t -> TestPersistentTasksExecutor.NAME.equals(t.getTaskName())).count()); logger.trace("waiting for the tasks to be running"); @@ -79,7 +79,7 @@ public void testEnableAssignmentAfterRestart() throws Exception { assertEnableAssignmentSetting(Allocation.NONE); logger.trace("persistent tasks are not assigned"); - tasks = internalCluster().clusterService().state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + tasks = internalCluster().clusterService().state().getMetadata().section(PersistentTasksMetadataSection.TYPE); assertEquals( numberOfTasks, tasks.tasks() diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java index abcac0cade456..824e88c429957 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CorruptedBlobStoreRepositoryIT.java @@ -212,7 +212,7 @@ public void testFindDanglingLatestGeneration() throws Exception { currentState -> ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.getMetadata()) - .putCustom( + .putSection( RepositoriesMetadata.TYPE, RepositoriesMetadata.get(currentState) .withUpdatedGeneration(repository.getMetadata().name(), beforeMoveGen, beforeMoveGen + 1) diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java index 041d722591391..157adf6c57074 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/CustomMetadataContextIT.java @@ -11,12 +11,13 @@ import org.elasticsearch.action.ActionRequestBuilder; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.CheckedFunction; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.repositories.RepositoryMissingException; -import org.elasticsearch.test.TestCustomMetadata; +import org.elasticsearch.test.TestMetadataSection; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; import org.elasticsearch.xcontent.XContentParser; @@ -38,7 +39,7 @@ public class CustomMetadataContextIT extends AbstractSnapshotIntegTestCase { @Override protected Collection> nodePlugins() { - return Collections.singletonList(TestCustomMetadataPlugin.class); + return Collections.singletonList(TestMetadataSectionPlugin.class); } public void testShouldNotRestoreRepositoryMetadata() { @@ -80,9 +81,9 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { boolean isSnapshotMetadataSet = randomBoolean(); updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { if (isSnapshotMetadataSet) { - metadataBuilder.putCustom(SnapshotMetadata.TYPE, new SnapshotMetadata("before_snapshot_s")); + metadataBuilder.putSection(SnapshotMetadata.TYPE, new SnapshotMetadata("before_snapshot_s")); } - metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("before_snapshot_ns")); + metadataBuilder.putSection(ApiMetadata.TYPE, new ApiMetadata("before_snapshot_ns")); })); logger.info("create snapshot"); @@ -92,11 +93,11 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { logger.info("update custom persistent metadata"); updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { if (isSnapshotMetadataSet == false || randomBoolean()) { - metadataBuilder.putCustom(SnapshotMetadata.TYPE, new SnapshotMetadata("after_snapshot_s")); + metadataBuilder.putSection(SnapshotMetadata.TYPE, new SnapshotMetadata("after_snapshot_s")); } else { - metadataBuilder.removeCustom(SnapshotMetadata.TYPE); + metadataBuilder.removeSection(SnapshotMetadata.TYPE); } - metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("after_snapshot_ns")); + metadataBuilder.putSection(ApiMetadata.TYPE, new ApiMetadata("after_snapshot_ns")); })); logger.info("restore snapshot"); @@ -109,18 +110,18 @@ public void testShouldRestoreOnlySnapshotMetadata() throws Exception { var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); logger.info("check that custom persistent metadata [{}] is correctly restored", metadata); if (isSnapshotMetadataSet) { - assertThat(metadata.custom(SnapshotMetadata.TYPE).getData(), equalTo("before_snapshot_s")); + assertThat(metadata.section(SnapshotMetadata.TYPE).getData(), equalTo("before_snapshot_s")); } else { - assertThat(metadata.custom(SnapshotMetadata.TYPE), nullValue()); + assertThat(metadata.section(SnapshotMetadata.TYPE), nullValue()); } - assertThat(metadata.custom(ApiMetadata.TYPE).getData(), equalTo("after_snapshot_ns")); + assertThat(metadata.section(ApiMetadata.TYPE).getData(), equalTo("after_snapshot_ns")); } public void testShouldKeepGatewayMetadataAfterRestart() throws Exception { logger.info("add custom gateway metadata"); updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { - metadataBuilder.putCustom(GatewayMetadata.TYPE, new GatewayMetadata("before_restart_s_gw")); - metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("before_restart_ns")); + metadataBuilder.putSection(GatewayMetadata.TYPE, new GatewayMetadata("before_restart_s_gw")); + metadataBuilder.putSection(ApiMetadata.TYPE, new ApiMetadata("before_restart_ns")); })); logger.info("restart all nodes"); @@ -129,45 +130,45 @@ public void testShouldKeepGatewayMetadataAfterRestart() throws Exception { var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); logger.info("check that gateway custom metadata [{}] survived full cluster restart", metadata); - assertThat(metadata.custom(GatewayMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); - assertThat(metadata.custom(ApiMetadata.TYPE), nullValue()); + assertThat(metadata.section(GatewayMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); + assertThat(metadata.section(ApiMetadata.TYPE), nullValue()); } public void testShouldExposeApiMetadata() throws Exception { logger.info("add custom api metadata"); updateClusterState(currentState -> currentState.copyAndUpdateMetadata(metadataBuilder -> { - metadataBuilder.putCustom(ApiMetadata.TYPE, new ApiMetadata("before_restart_s_gw")); - metadataBuilder.putCustom(NonApiMetadata.TYPE, new NonApiMetadata("before_restart_ns")); + metadataBuilder.putSection(ApiMetadata.TYPE, new ApiMetadata("before_restart_s_gw")); + metadataBuilder.putSection(NonApiMetadata.TYPE, new NonApiMetadata("before_restart_ns")); })); var metadata = clusterAdmin().prepareState().get().getState().getMetadata(); logger.info("check that api custom metadata [{}] is visible via api", metadata); - assertThat(metadata.custom(ApiMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); - assertThat(metadata.custom(NonApiMetadata.TYPE), nullValue()); + assertThat(metadata.section(ApiMetadata.TYPE).getData(), equalTo("before_restart_s_gw")); + assertThat(metadata.section(NonApiMetadata.TYPE), nullValue()); } - public static class TestCustomMetadataPlugin extends Plugin { + public static class TestMetadataSectionPlugin extends Plugin { private final List namedWritables = new ArrayList<>(); private final List namedXContents = new ArrayList<>(); - public TestCustomMetadataPlugin() { + public TestMetadataSectionPlugin() { registerBuiltinWritables(); } - private void registerMetadataCustom( + private void registerMetadataSection( String name, Writeable.Reader reader, Writeable.Reader> diffReader, CheckedFunction parser ) { - namedWritables.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, name, reader)); + namedWritables.add(new NamedWriteableRegistry.Entry(MetadataSection.class, name, reader)); namedWritables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, name, diffReader)); - namedXContents.add(new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(name), parser)); + namedXContents.add(new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(name), parser)); } private void registerBuiltinWritables() { - Map.>of( + Map.>of( SnapshotMetadata.TYPE, SnapshotMetadata::new, GatewayMetadata.TYPE, @@ -178,11 +179,11 @@ private void registerBuiltinWritables() { NonApiMetadata::new ) .forEach( - (type, constructor) -> registerMetadataCustom( + (type, constructor) -> registerMetadataSection( type, - in -> TestCustomMetadata.readFrom(constructor, in), - in -> TestCustomMetadata.readDiffFrom(type, in), - parser -> TestCustomMetadata.fromXContent(constructor, parser) + in -> TestMetadataSection.readFrom(constructor, in), + in -> TestMetadataSection.readDiffFrom(type, in), + parser -> TestMetadataSection.fromXContent(constructor, parser) ) ); } @@ -198,11 +199,11 @@ public List getNamedXContent() { } } - private abstract static class ThisTestCustomMetadata extends TestCustomMetadata { + private abstract static class ThisTestMetadataSection extends TestMetadataSection { private final String type; private final EnumSet context; - ThisTestCustomMetadata(String data, String type, EnumSet context) { + ThisTestMetadataSection(String data, String type, EnumSet context) { super(data); this.type = type; this.context = context; @@ -224,7 +225,7 @@ public EnumSet context() { } } - private static class SnapshotMetadata extends ThisTestCustomMetadata { + private static class SnapshotMetadata extends ThisTestMetadataSection { public static final String TYPE = "test_metadata_scope_snapshot"; SnapshotMetadata(String data) { @@ -232,7 +233,7 @@ private static class SnapshotMetadata extends ThisTestCustomMetadata { } } - private static class GatewayMetadata extends ThisTestCustomMetadata { + private static class GatewayMetadata extends ThisTestMetadataSection { public static final String TYPE = "test_metadata_scope_gateway"; GatewayMetadata(String data) { @@ -240,7 +241,7 @@ private static class GatewayMetadata extends ThisTestCustomMetadata { } } - private static class ApiMetadata extends ThisTestCustomMetadata { + private static class ApiMetadata extends ThisTestMetadataSection { public static final String TYPE = "test_metadata_scope_api"; ApiMetadata(String data) { @@ -248,7 +249,7 @@ private static class ApiMetadata extends ThisTestCustomMetadata { } } - private static class NonApiMetadata extends ThisTestCustomMetadata { + private static class NonApiMetadata extends ThisTestMetadataSection { public static final String TYPE = "test_metadata_scope_non_api"; NonApiMetadata(String data) { diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java index d4c0a4c80a3b5..106567b16bd03 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/RepositoriesIT.java @@ -77,7 +77,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> check that repository is really there"); ClusterStateResponse clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get(); Metadata metadata = clusterStateResponse.getState().getMetadata(); - RepositoriesMetadata repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); + RepositoriesMetadata repositoriesMetadata = metadata.section(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); assertThat(repositoriesMetadata.repository("test-repo-1"), notNullValue()); assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs")); @@ -88,7 +88,7 @@ public void testRepositoryCreation() throws Exception { logger.info("--> check that both repositories are in cluster state"); clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get(); metadata = clusterStateResponse.getState().getMetadata(); - repositoriesMetadata = metadata.custom(RepositoriesMetadata.TYPE); + repositoriesMetadata = metadata.section(RepositoriesMetadata.TYPE); assertThat(repositoriesMetadata, notNullValue()); assertThat(repositoriesMetadata.repositories().size(), equalTo(2)); assertThat(repositoriesMetadata.repository("test-repo-1"), notNullValue()); diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java index 2d1e16dc64273..bbaa28035875d 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotShutdownIT.java @@ -498,7 +498,7 @@ private static void putShutdownMetadata( public ClusterState execute(ClusterState currentState) { final var nodeId = currentState.nodes().resolveNode(nodeName).getId(); return currentState.copyAndUpdateMetadata( - mdb -> mdb.putCustom( + mdb -> mdb.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(Map.of(nodeId, shutdownMetadataBuilder.setNodeId(nodeId).build())) ) @@ -521,7 +521,7 @@ private static void clearShutdownMetadata(ClusterService clusterService) { safeAwait(listener -> clusterService.submitUnbatchedStateUpdateTask("remove restart marker", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - return currentState.copyAndUpdateMetadata(mdb -> mdb.putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY)); + return currentState.copyAndUpdateMetadata(mdb -> mdb.putSection(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY)); } @Override diff --git a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java index 9c9076dff00e2..66dbff15a02cb 100644 --- a/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java +++ b/server/src/internalClusterTest/java/org/elasticsearch/snapshots/SnapshotStressTestsIT.java @@ -1225,7 +1225,7 @@ public ClusterState execute(ClusterState currentState) { ); final var nodeId = currentState.nodes().resolveNode(node.nodeName).getId(); return currentState.copyAndUpdateMetadata( - mdb -> mdb.putCustom( + mdb -> mdb.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -1262,7 +1262,7 @@ public void clusterStateProcessed(ClusterState initialState, ClusterState newSta @Override public ClusterState execute(ClusterState currentState) { return currentState.copyAndUpdateMetadata( - mdb -> mdb.putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY) + mdb -> mdb.putSection(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java index 11bdd41f458d3..e6fd805af77f8 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportDeleteDesiredNodesAction.java @@ -97,7 +97,7 @@ public void taskSucceeded(DeleteDesiredNodesTask task, Void unused) { @Override public ClusterState afterBatchExecution(ClusterState clusterState, boolean clusterStateChanged) { - return clusterState.copyAndUpdateMetadata(metadata -> metadata.removeCustom(DesiredNodesMetadata.TYPE)); + return clusterState.copyAndUpdateMetadata(metadata -> metadata.removeSection(DesiredNodesMetadata.TYPE)); } } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java index 9ec8feeb5d405..83ba2a9be523d 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/desirednodes/TransportUpdateDesiredNodesAction.java @@ -116,7 +116,7 @@ protected void doExecute(Task task, UpdateDesiredNodesRequest request, ActionLis static ClusterState replaceDesiredNodes(ClusterState clusterState, DesiredNodes newDesiredNodes) { return clusterState.copyAndUpdateMetadata( - metadata -> metadata.putCustom(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(newDesiredNodes)) + metadata -> metadata.putSection(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(newDesiredNodes)) ); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java index ae538e7c72334..8dd1adbebf749 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/migration/TransportGetFeatureUpgradeStatusAction.java @@ -23,7 +23,7 @@ import org.elasticsearch.index.IndexVersions; import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -103,7 +103,7 @@ protected void masterOperation( .map(feature -> getFeatureUpgradeStatus(state, feature)) .toList(); - boolean migrationTaskExists = PersistentTasksCustomMetadata.getTaskWithId(state, SYSTEM_INDEX_UPGRADE_TASK_NAME) != null; + boolean migrationTaskExists = PersistentTasksMetadataSection.getTaskWithId(state, SYSTEM_INDEX_UPGRADE_TASK_NAME) != null; GetFeatureUpgradeStatusResponse.UpgradeStatus initalStatus = migrationTaskExists ? IN_PROGRESS : NO_MIGRATION_NEEDED; GetFeatureUpgradeStatusResponse.UpgradeStatus status = Stream.concat( @@ -120,7 +120,7 @@ protected void masterOperation( static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeStatus(ClusterState state, SystemIndices.Feature feature) { String featureName = feature.getName(); - PersistentTasksCustomMetadata.PersistentTask migrationTask = PersistentTasksCustomMetadata + PersistentTasksMetadataSection.PersistentTask migrationTask = PersistentTasksMetadataSection .getTaskWithId(state, SYSTEM_INDEX_UPGRADE_TASK_NAME); final String currentFeature = Optional.ofNullable(migrationTask) .map(task -> task.getState()) @@ -155,7 +155,7 @@ static GetFeatureUpgradeStatusResponse.FeatureUpgradeStatus getFeatureUpgradeSta // visible for testing static List getIndexInfos(ClusterState state, SystemIndices.Feature feature) { final SingleFeatureMigrationResult featureStatus = Optional.ofNullable( - (FeatureMigrationResults) state.metadata().custom(FeatureMigrationResults.TYPE) + (FeatureMigrationResults) state.metadata().section(FeatureMigrationResults.TYPE) ).map(FeatureMigrationResults::getFeatureStatuses).map(results -> results.get(feature.getName())).orElse(null); final String failedFeatureName = featureStatus == null ? null : featureStatus.getFailedIndexName(); diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java index 0e5d7cda4a0d4..b00fde105a793 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/state/TransportClusterStateAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.Metadata.Custom; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.version.CompatibilityVersions; @@ -224,9 +224,9 @@ private ClusterStateResponse buildResponse(final ClusterStateRequest request, fi } // filter out metadata that shouldn't be returned by the API - for (Map.Entry custom : currentState.metadata().customs().entrySet()) { + for (Map.Entry custom : currentState.metadata().customs().entrySet()) { if (custom.getValue().context().contains(Metadata.XContentContext.API) == false) { - mdBuilder.removeCustom(custom.getKey()); + mdBuilder.removeSection(custom.getKey()); } } } diff --git a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java index aca9bb81fb53f..4d1e96838ed6f 100644 --- a/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java +++ b/server/src/main/java/org/elasticsearch/action/ingest/ReservedPipelineAction.java @@ -70,10 +70,10 @@ private Collection prepare(List requests private static ClusterState wrapIngestTaskExecute(IngestService.PipelineClusterStateUpdateTask task, ClusterState state) { final var allIndexMetadata = state.metadata().indices().values(); - final IngestMetadata currentIndexMetadata = state.metadata().custom(IngestMetadata.TYPE); + final IngestMetadata currentIndexMetadata = state.metadata().section(IngestMetadata.TYPE); var updatedIngestMetadata = task.execute(currentIndexMetadata, allIndexMetadata); - return state.copyAndUpdateMetadata(b -> b.putCustom(IngestMetadata.TYPE, updatedIngestMetadata)); + return state.copyAndUpdateMetadata(b -> b.putSection(IngestMetadata.TYPE, updatedIngestMetadata)); } @Override diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java index 0392ca2e6581a..c77b9f3906f9d 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterChangedEvent.java @@ -12,6 +12,7 @@ import org.elasticsearch.cluster.metadata.IndexGraveyard.IndexGraveyardDiff; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.gateway.GatewayService; import org.elasticsearch.index.Index; @@ -121,10 +122,10 @@ public boolean metadataChanged() { */ public Set changedCustomMetadataSet() { Set result = new HashSet<>(); - Map currentCustoms = state.metadata().customs(); - Map previousCustoms = previousState.metadata().customs(); + Map currentCustoms = state.metadata().customs(); + Map previousCustoms = previousState.metadata().customs(); if (currentCustoms.equals(previousCustoms) == false) { - for (Map.Entry currentCustomMetadata : currentCustoms.entrySet()) { + for (Map.Entry currentCustomMetadata : currentCustoms.entrySet()) { // new custom md added or existing custom md changed if (previousCustoms.containsKey(currentCustomMetadata.getKey()) == false || currentCustomMetadata.getValue().equals(previousCustoms.get(currentCustomMetadata.getKey())) == false) { @@ -132,7 +133,7 @@ public Set changedCustomMetadataSet() { } } // existing custom md deleted - for (Map.Entry previousCustomMetadata : previousCustoms.entrySet()) { + for (Map.Entry previousCustomMetadata : previousCustoms.entrySet()) { if (currentCustoms.containsKey(previousCustomMetadata.getKey()) == false) { result.add(previousCustomMetadata.getKey()); } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 3fba3a7bdbe13..8d6735d38222b 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -17,12 +17,12 @@ import org.elasticsearch.cluster.metadata.DesiredNodesMetadata; import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.MetadataDeleteIndexService; import org.elasticsearch.cluster.metadata.MetadataIndexAliasesService; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.metadata.MetadataIndexTemplateService; import org.elasticsearch.cluster.metadata.MetadataMappingService; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.metadata.NodesShutdownMetadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.routing.DelayedAllocationService; @@ -72,7 +72,7 @@ import org.elasticsearch.indices.SystemIndices; import org.elasticsearch.ingest.IngestMetadata; import org.elasticsearch.injection.guice.AbstractModule; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksNodeService; import org.elasticsearch.plugins.ClusterPlugin; import org.elasticsearch.script.ScriptMetadata; @@ -215,9 +215,9 @@ public static List getNamedWriteables() { registerMetadataCustom(entries, IndexGraveyard.TYPE, IndexGraveyard::new, IndexGraveyard::readDiffFrom); registerMetadataCustom( entries, - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata::new, - PersistentTasksCustomMetadata::readDiffFrom + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection::new, + PersistentTasksMetadataSection::readDiffFrom ); registerMetadataCustom( entries, @@ -256,58 +256,58 @@ public static List getNamedXWriteables() { // Metadata entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(RepositoriesMetadata.TYPE), RepositoriesMetadata::fromXContent ) ); entries.add( - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(IngestMetadata.TYPE), IngestMetadata::fromXContent) + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(IngestMetadata.TYPE), IngestMetadata::fromXContent) ); entries.add( - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(ScriptMetadata.TYPE), ScriptMetadata::fromXContent) + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(ScriptMetadata.TYPE), ScriptMetadata::fromXContent) ); entries.add( - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(IndexGraveyard.TYPE), IndexGraveyard::fromXContent) + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(IndexGraveyard.TYPE), IndexGraveyard::fromXContent) ); entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, - new ParseField(PersistentTasksCustomMetadata.TYPE), - PersistentTasksCustomMetadata::fromXContent + MetadataSection.class, + new ParseField(PersistentTasksMetadataSection.TYPE), + PersistentTasksMetadataSection::fromXContent ) ); entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(ComponentTemplateMetadata.TYPE), ComponentTemplateMetadata::fromXContent ) ); entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(ComposableIndexTemplateMetadata.TYPE), ComposableIndexTemplateMetadata::fromXContent ) ); entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(DataStreamMetadata.TYPE), DataStreamMetadata::fromXContent ) ); entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(NodesShutdownMetadata.TYPE), NodesShutdownMetadata::fromXContent ) ); entries.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(DesiredNodesMetadata.TYPE), DesiredNodesMetadata::fromXContent ) @@ -324,13 +324,13 @@ private static void registerClusterCustom( registerCustom(entries, ClusterState.Custom.class, name, reader, diffReader); } - private static void registerMetadataCustom( + private static void registerMetadataCustom( List entries, String name, Reader reader, Reader> diffReader ) { - registerCustom(entries, Metadata.Custom.class, name, reader, diffReader); + registerCustom(entries, MetadataSection.class, name, reader, diffReader); } private static void registerCustom( diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java index c54269da68507..a6e8b35a0b964 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterState.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterState.java @@ -21,6 +21,7 @@ import org.elasticsearch.cluster.coordination.NoMasterBlockService; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RoutingNodes; @@ -512,10 +513,10 @@ public String toString() { } if (metadata.customs().isEmpty() == false) { sb.append("metadata customs:\n"); - for (final Map.Entry cursor : metadata.customs().entrySet()) { + for (final Map.Entry cursor : metadata.customs().entrySet()) { final String type = cursor.getKey(); - final Metadata.Custom custom = cursor.getValue(); - sb.append(TAB).append(type).append(": ").append(custom); + final MetadataSection section = cursor.getValue(); + sb.append(TAB).append(type).append(": ").append(section); } sb.append("\n"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java index 82abf4b4c7d5d..a4e5b34314847 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommand.java @@ -27,6 +27,7 @@ import org.elasticsearch.cluster.metadata.ComposableIndexTemplateMetadata; import org.elasticsearch.cluster.metadata.DataStreamMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.cli.EnvironmentAwareCommand; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamOutput; @@ -68,7 +69,7 @@ public abstract class ElasticsearchNodeCommand extends EnvironmentAwareCommand { @Override public T parseNamedObject(Class categoryClass, String name, XContentParser parser, C context) throws IOException { // Currently, two unknown top-level objects are present - if (Metadata.Custom.class.isAssignableFrom(categoryClass)) { + if (MetadataSection.class.isAssignableFrom(categoryClass)) { if (DataStreamMetadata.TYPE.equals(name) || ComposableIndexTemplateMetadata.TYPE.equals(name) || ComponentTemplateMetadata.TYPE.equals(name)) { @@ -78,7 +79,7 @@ public T parseNamedObject(Class categoryClass, String name, XContentPa // TODO: Try to parse other named objects (e.g. stored scripts, ingest pipelines) that are part of core es as well? // Note that supporting PersistentTasksCustomMetadata is trickier, because PersistentTaskParams is a named object too. } else { - return (T) new UnknownMetadataCustom(name, parser.mapOrdered()); + return (T) new UnknownMetadataSection(name, parser.mapOrdered()); } } if (Condition.class.isAssignableFrom(categoryClass)) { @@ -201,7 +202,7 @@ OptionParser getParser() { return parser; } - public record UnknownMetadataCustom(String name, Map contents) implements Metadata.Custom { + public record UnknownMetadataSection(String name, Map contents) implements MetadataSection { @Override public EnumSet context() { @@ -209,7 +210,7 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { assert false; throw new UnsupportedOperationException(); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java index 9223e02fc946c..aa8f5918dcd91 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeJoinExecutor.java @@ -30,7 +30,7 @@ import org.elasticsearch.features.FeatureService; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.IndexVersions; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import java.util.ArrayList; import java.util.Collection; @@ -335,7 +335,7 @@ protected ClusterState.Builder becomeMasterAndTrimConflictingNodes( .build(); logger.trace("becomeMasterAndTrimConflictingNodes: {}", tmpState.nodes()); allocationService.cleanCaches(); - tmpState = PersistentTasksCustomMetadata.disassociateDeadNodes(tmpState); + tmpState = PersistentTasksMetadataSection.disassociateDeadNodes(tmpState); tmpState = maybeReconfigureAfterMasterElection.apply(tmpState); return ClusterState.builder(allocationService.disassociateDeadNodes(tmpState, false, "removed dead nodes on election")); } diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java index 5cbe742aec628..a92720bed1043 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/NodeLeftExecutor.java @@ -18,7 +18,7 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.MasterService; import org.elasticsearch.cluster.version.CompatibilityVersions; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import java.util.HashMap; import java.util.Map; @@ -93,7 +93,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionContext) t .nodeFeatures(nodeFeatures) .build(); remainingNodesClusterState(remainingNodesClusterState); - final var ptasksDisassociatedState = PersistentTasksCustomMetadata.disassociateDeadNodes(remainingNodesClusterState); + final var ptasksDisassociatedState = PersistentTasksMetadataSection.disassociateDeadNodes(remainingNodesClusterState); return allocationService.disassociateDeadNodes( ptasksDisassociatedState, true, diff --git a/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java b/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java index 77ed4a5d52ef1..a3b5669404df6 100644 --- a/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java +++ b/server/src/main/java/org/elasticsearch/cluster/coordination/RemoveCustomsCommand.java @@ -61,7 +61,7 @@ protected void processDataPaths(Terminal terminal, Path[] dataPaths, OptionSet o boolean matched = false; for (String customKey : oldClusterState.metadata().customs().keySet()) { if (Regex.simpleMatch(customToRemove, customKey)) { - metadataBuilder.removeCustom(customKey); + metadataBuilder.removeSection(customKey); if (matched == false) { terminal.println("The following customs will be removed:"); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java index 71966deb076b6..3add83c001b4e 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComponentTemplateMetadata.java @@ -33,7 +33,7 @@ * {@link ComponentTemplateMetadata} is a custom {@link Metadata} implementation for storing a map * of component templates and their names. */ -public class ComponentTemplateMetadata implements Metadata.Custom { +public class ComponentTemplateMetadata implements MetadataSection { public static final String TYPE = "component_template"; private static final ParseField COMPONENT_TEMPLATE = new ParseField("component_template"); @SuppressWarnings("unchecked") @@ -68,11 +68,11 @@ public Map componentTemplates() { } @Override - public Diff diff(Metadata.Custom before) { + public Diff diff(MetadataSection before) { return new ComponentTemplateMetadataDiff((ComponentTemplateMetadata) before, this); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new ComponentTemplateMetadataDiff(in); } @@ -127,7 +127,7 @@ public String toString() { return Strings.toString(this); } - static class ComponentTemplateMetadataDiff implements NamedDiff { + static class ComponentTemplateMetadataDiff implements NamedDiff { final Diff> componentTemplateDiff; @@ -149,7 +149,7 @@ static class ComponentTemplateMetadataDiff implements NamedDiff } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new ComponentTemplateMetadata(componentTemplateDiff.apply(((ComponentTemplateMetadata) part).componentTemplates)); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java index 8fd606048d539..161a55a2a0846 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/ComposableIndexTemplateMetadata.java @@ -30,10 +30,10 @@ import java.util.Objects; /** - * The {@link ComposableIndexTemplateMetadata} class is a custom {@link Metadata.Custom} implementation that + * The {@link ComposableIndexTemplateMetadata} class is a custom {@link MetadataSection} implementation that * stores a map of ids to {@link ComposableIndexTemplate} templates. */ -public class ComposableIndexTemplateMetadata implements Metadata.Custom { +public class ComposableIndexTemplateMetadata implements MetadataSection { public static final String TYPE = "index_template"; private static final ParseField INDEX_TEMPLATE = new ParseField("index_template"); @SuppressWarnings("unchecked") @@ -78,11 +78,11 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom before) { + public Diff diff(MetadataSection before) { return new ComposableIndexTemplateMetadataDiff((ComposableIndexTemplateMetadata) before, this); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new ComposableIndexTemplateMetadataDiff(in); } @@ -128,7 +128,7 @@ public String toString() { return Strings.toString(this); } - static class ComposableIndexTemplateMetadataDiff implements NamedDiff { + static class ComposableIndexTemplateMetadataDiff implements NamedDiff { final Diff> indexTemplateDiff; @@ -150,7 +150,7 @@ static class ComposableIndexTemplateMetadataDiff implements NamedDiff getDataStreamAliases() { } @Override - public Diff diff(Metadata.Custom before) { + public Diff diff(MetadataSection before) { return new DataStreamMetadata.DataStreamMetadataDiff((DataStreamMetadata) before, this); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new DataStreamMetadata.DataStreamMetadataDiff(in); } @@ -262,7 +262,7 @@ public String toString() { return Strings.toString(this); } - static class DataStreamMetadataDiff implements NamedDiff { + static class DataStreamMetadataDiff implements NamedDiff { private static final DiffableUtils.DiffableValueReader DS_DIFF_READER = new DiffableUtils.DiffableValueReader<>( DataStream::read, @@ -294,7 +294,7 @@ static class DataStreamMetadataDiff implements NamedDiff { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new DataStreamMetadata( dataStreamDiff.apply(((DataStreamMetadata) part).dataStreams), dataStreamAliasDiff.apply(((DataStreamMetadata) part).dataStreamAliases) diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java index e572f20557c79..1bcfeffb66432 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodes.java @@ -350,7 +350,7 @@ public static ClusterState updateDesiredNodesStatusIfNeeded(ClusterState cluster return desiredNodes == updatedDesiredNodes ? clusterState : clusterState.copyAndUpdateMetadata( - metadata -> metadata.putCustom(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(updatedDesiredNodes)) + metadata -> metadata.putSection(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(updatedDesiredNodes)) ); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java index ab5c324150f2b..f727eff008ef7 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadata.java @@ -27,7 +27,7 @@ import java.util.Iterator; import java.util.Objects; -public class DesiredNodesMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public class DesiredNodesMetadata extends AbstractNamedDiffable implements MetadataSection { private static final TransportVersion MIN_SUPPORTED_VERSION = TransportVersions.V_8_1_0; public static final String TYPE = "desired_nodes"; @@ -61,8 +61,8 @@ public void writeTo(StreamOutput out) throws IOException { latestDesiredNodes.writeTo(out); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(Metadata.Custom.class, TYPE, in); + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(MetadataSection.class, TYPE, in); } public static DesiredNodesMetadata fromXContent(XContentParser parser) throws IOException { @@ -75,7 +75,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore } public static DesiredNodesMetadata fromClusterState(ClusterState clusterState) { - return clusterState.metadata().custom(TYPE, EMPTY); + return clusterState.metadata().section(TYPE, EMPTY); } @Nullable diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java index 140eeb2e240ff..405b7f7f9807f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/IndexGraveyard.java @@ -48,7 +48,7 @@ * tombstones remain in the cluster state for a fixed period of time, after which * they are purged. */ -public final class IndexGraveyard implements Metadata.Custom { +public final class IndexGraveyard implements MetadataSection { /** * Setting for the maximum tombstones allowed in the cluster state; @@ -146,11 +146,11 @@ public void writeTo(final StreamOutput out) throws IOException { } @Override - public Diff diff(final Metadata.Custom previous) { + public Diff diff(final MetadataSection previous) { return new IndexGraveyardDiff((IndexGraveyard) previous, this); } - public static NamedDiff readDiffFrom(final StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(final StreamInput in) throws IOException { return new IndexGraveyardDiff(in); } @@ -250,7 +250,7 @@ public IndexGraveyard build(final Settings settings) { /** * A class representing a diff of two IndexGraveyard objects. */ - public static final class IndexGraveyardDiff implements NamedDiff { + public static final class IndexGraveyardDiff implements NamedDiff { private final List added; private final int removedCount; @@ -304,7 +304,7 @@ public void writeTo(final StreamOutput out) throws IOException { } @Override - public IndexGraveyard apply(final Metadata.Custom previous) { + public IndexGraveyard apply(final MetadataSection previous) { final IndexGraveyard old = (IndexGraveyard) previous; if (removedCount > old.tombstones.size()) { throw new IllegalStateException( diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java index 905c3078b3c9c..53cb5d9a22b4c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/Metadata.java @@ -17,7 +17,6 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.Diffable; import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.NamedDiffable; import org.elasticsearch.cluster.NamedDiffableValueSerializer; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.block.ClusterBlock; @@ -56,7 +55,6 @@ import org.elasticsearch.rest.RestStatus; import org.elasticsearch.transport.Transports; import org.elasticsearch.xcontent.NamedObjectNotFoundException; -import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.XContentParser; @@ -139,22 +137,6 @@ public enum XContentContext { */ public static EnumSet ALL_CONTEXTS = EnumSet.allOf(XContentContext.class); - /** - * Custom metadata that persists (via XContent) across restarts. The deserialization method for each implementation must be registered - * with the {@link NamedXContentRegistry}. - */ - public interface Custom extends NamedDiffable, ChunkedToXContent { - - EnumSet context(); - - /** - * @return true if this custom could be restored from snapshot - */ - default boolean isRestorable() { - return context().contains(XContentContext.SNAPSHOT); - } - } - public static final Setting SETTING_READ_ONLY_SETTING = Setting.boolSetting( "cluster.blocks.read_only", false, @@ -202,7 +184,9 @@ default boolean isRestorable() { public static final String DEDUPLICATED_MAPPINGS_PARAM = "deduplicated_mappings"; public static final String GLOBAL_STATE_FILE_PREFIX = "global-"; - private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>(Custom.class); + private static final NamedDiffableValueSerializer CUSTOM_VALUE_SERIALIZER = new NamedDiffableValueSerializer<>( + MetadataSection.class + ); private final String clusterUUID; private final boolean clusterUUIDCommitted; @@ -217,7 +201,7 @@ default boolean isRestorable() { private final ImmutableOpenMap indices; private final ImmutableOpenMap> aliasedIndices; private final ImmutableOpenMap templates; - private final ImmutableOpenMap customs; + private final ImmutableOpenMap sections; private final Map reservedStateMetadata; private final transient int totalNumberOfShards; // Transient ? not serializable anyway? @@ -259,7 +243,7 @@ private Metadata( ImmutableOpenMap indices, ImmutableOpenMap> aliasedIndices, ImmutableOpenMap templates, - ImmutableOpenMap customs, + ImmutableOpenMap sections, String[] allIndices, String[] visibleIndices, String[] allOpenIndices, @@ -281,7 +265,7 @@ private Metadata( this.hashesOfConsistentSettings = hashesOfConsistentSettings; this.indices = indices; this.aliasedIndices = aliasedIndices; - this.customs = customs; + this.sections = sections; this.templates = templates; this.totalNumberOfShards = totalNumberOfShards; this.totalOpenIndexShards = totalOpenIndexShards; @@ -300,7 +284,7 @@ private Metadata( private boolean assertConsistent() { final var lookup = this.indicesLookup; - final var dsMetadata = custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY); + final var dsMetadata = section(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY); assert lookup == null || lookup.equals(Builder.buildIndicesLookup(dsMetadata, indices)); try { Builder.ensureNoNameCollisions(aliasedIndices.keySet(), indices, dsMetadata); @@ -339,7 +323,7 @@ public Metadata withIncrementedVersion() { indices, aliasedIndices, templates, - customs, + sections, allIndices, visibleIndices, allOpenIndices, @@ -400,7 +384,7 @@ public Metadata withLifecycleState(final Index index, final LifecycleExecutionSt builder.build(), aliasedIndices, templates, - customs, + sections, allIndices, visibleIndices, allOpenIndices, @@ -440,7 +424,7 @@ public Metadata withIndexSettingsUpdates(final Map updates) { builder.build(), aliasedIndices, templates, - customs, + sections, allIndices, visibleIndices, allOpenIndices, @@ -469,7 +453,7 @@ public Metadata withCoordinationMetadata(CoordinationMetadata coordinationMetada indices, aliasedIndices, templates, - customs, + sections, allIndices, visibleIndices, allOpenIndices, @@ -505,7 +489,7 @@ public Metadata withLastCommittedValues( indices, aliasedIndices, templates, - customs, + sections, allIndices, visibleIndices, allOpenIndices, @@ -546,7 +530,7 @@ public Metadata withAllocationAndTermUpdatesOnly(Map upda updatedIndicesBuilder.build(), aliasedIndices, templates, - customs, + sections, allIndices, visibleIndices, allOpenIndices, @@ -641,7 +625,7 @@ public Metadata withAddedIndex(IndexMetadata index) { indicesMap, updatedAliases, templates, - customs, + sections, updatedAllIndices, updatedVisibleIndices, updatedOpenIndices, @@ -779,7 +763,7 @@ private synchronized SortedMap buildIndicesLookup() { if (i != null) { return i; } - i = Builder.buildIndicesLookup(custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY), indices); + i = Builder.buildIndicesLookup(section(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY), indices); indicesLookup = i; return i; } @@ -1295,13 +1279,13 @@ public Map getTemplates() { } public Map componentTemplates() { - return Optional.ofNullable((ComponentTemplateMetadata) this.custom(ComponentTemplateMetadata.TYPE)) + return Optional.ofNullable((ComponentTemplateMetadata) this.section(ComponentTemplateMetadata.TYPE)) .map(ComponentTemplateMetadata::componentTemplates) .orElse(Collections.emptyMap()); } public Map templatesV2() { - return Optional.ofNullable((ComposableIndexTemplateMetadata) this.custom(ComposableIndexTemplateMetadata.TYPE)) + return Optional.ofNullable((ComposableIndexTemplateMetadata) this.section(ComposableIndexTemplateMetadata.TYPE)) .map(ComposableIndexTemplateMetadata::indexTemplates) .orElse(Collections.emptyMap()); } @@ -1330,11 +1314,11 @@ public boolean isTimeSeriesTemplate(ComposableIndexTemplate indexTemplate) { } public Map dataStreams() { - return this.custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY).dataStreams(); + return this.section(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY).dataStreams(); } public Map dataStreamAliases() { - return this.custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY).getDataStreamAliases(); + return this.section(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY).getDataStreamAliases(); } /** @@ -1357,7 +1341,7 @@ public Map> dataStreamAliasesByDataStream() { } public NodesShutdownMetadata nodeShutdowns() { - return custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY); + return section(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY); } /** @@ -1386,8 +1370,8 @@ public boolean isIndexManagedByILM(IndexMetadata indexMetadata) { return true; } - public Map customs() { - return this.customs; + public Map customs() { + return this.sections; } /** @@ -1403,17 +1387,17 @@ public Map reservedStateMetadata() { * The collection of index deletions in the cluster. */ public IndexGraveyard indexGraveyard() { - return custom(IndexGraveyard.TYPE); + return section(IndexGraveyard.TYPE); } @SuppressWarnings("unchecked") - public T custom(String type) { - return (T) customs.get(type); + public T section(String type) { + return (T) sections.get(type); } @SuppressWarnings("unchecked") - public T custom(String type, T defaultValue) { - return (T) customs.getOrDefault(type, defaultValue); + public T section(String type, T defaultValue) { + return (T) sections.getOrDefault(type, defaultValue); } /** @@ -1468,17 +1452,17 @@ public static boolean isGlobalStateEquals(Metadata metadata1, Metadata metadata2 } // Check if any persistent metadata needs to be saved int customCount1 = 0; - for (Map.Entry cursor : metadata1.customs.entrySet()) { + for (Map.Entry cursor : metadata1.sections.entrySet()) { if (cursor.getValue().context().contains(XContentContext.GATEWAY)) { - if (cursor.getValue().equals(metadata2.custom(cursor.getKey())) == false) { + if (cursor.getValue().equals(metadata2.section(cursor.getKey())) == false) { return false; } customCount1++; } } int customCount2 = 0; - for (Custom custom : metadata2.customs.values()) { - if (custom.context().contains(XContentContext.GATEWAY)) { + for (MetadataSection section : metadata2.sections.values()) { + if (section.context().contains(XContentContext.GATEWAY)) { customCount2++; } } @@ -1543,7 +1527,7 @@ public Iterator toXContentChunked(ToXContent.Params p) { ), indices, Iterators.flatMap( - customs.entrySet().iterator(), + sections.entrySet().iterator(), entry -> entry.getValue().context().contains(context) ? ChunkedToXContentHelper.wrapWithObject(entry.getKey(), entry.getValue().toXContentChunked(p)) : Collections.emptyIterator() @@ -1572,7 +1556,7 @@ private static class MetadataDiff implements Diff { private final Diff hashesOfConsistentSettings; private final Diff> indices; private final Diff> templates; - private final Diff> customs; + private final Diff> customs; private final Diff> reservedStateMetadata; /** @@ -1599,8 +1583,8 @@ private static class MetadataDiff implements Diff { indices = DiffableUtils.diff(before.indices, after.indices, DiffableUtils.getStringKeySerializer()); templates = DiffableUtils.diff(before.templates, after.templates, DiffableUtils.getStringKeySerializer()); customs = DiffableUtils.diff( - before.customs, - after.customs, + before.sections, + after.sections, DiffableUtils.getStringKeySerializer(), CUSTOM_VALUE_SERIALIZER ); @@ -1693,10 +1677,10 @@ public Metadata apply(Metadata part) { builder.hashesOfConsistentSettings(hashesOfConsistentSettings.apply(part.hashesOfConsistentSettings)); builder.indices(updatedIndices); builder.templates(templates.apply(part.templates)); - builder.customs(customs.apply(part.customs)); + builder.sections(customs.apply(part.sections)); builder.put(reservedStateMetadata.apply(part.reservedStateMetadata)); if (part.indices == updatedIndices - && builder.dataStreamMetadata() == part.custom(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY)) { + && builder.dataStreamMetadata() == part.section(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY)) { builder.previousIndicesLookup = part.indicesLookup; } return builder.build(true); @@ -1737,8 +1721,8 @@ public static Metadata readFrom(StreamInput in) throws IOException { } int customSize = in.readVInt(); for (int i = 0; i < customSize; i++) { - Custom customIndexMetadata = in.readNamedWriteable(Custom.class); - builder.putCustom(customIndexMetadata.getWriteableName(), customIndexMetadata); + MetadataSection section = in.readNamedWriteable(MetadataSection.class); + builder.putSection(section.getWriteableName(), section); } if (in.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { int reservedStateSize = in.readVInt(); @@ -1771,7 +1755,7 @@ public void writeTo(StreamOutput out) throws IOException { indexMetadata.writeTo(out, writeMappingsHash); } out.writeCollection(templates.values()); - VersionedNamedWriteable.writeVersionedWritables(out, customs); + VersionedNamedWriteable.writeVersionedWritables(out, sections); if (out.getTransportVersion().onOrAfter(TransportVersions.V_8_4_0)) { out.writeCollection(reservedStateMetadata.values()); } @@ -1805,7 +1789,7 @@ public static class Builder { private final ImmutableOpenMap.Builder indices; private final ImmutableOpenMap.Builder> aliasedIndices; private final ImmutableOpenMap.Builder templates; - private final ImmutableOpenMap.Builder customs; + private final ImmutableOpenMap.Builder sections; private SortedMap previousIndicesLookup; @@ -1835,7 +1819,7 @@ public Builder() { this.indices = ImmutableOpenMap.builder(metadata.indices); this.aliasedIndices = ImmutableOpenMap.builder(metadata.aliasedIndices); this.templates = ImmutableOpenMap.builder(metadata.templates); - this.customs = ImmutableOpenMap.builder(metadata.customs); + this.sections = ImmutableOpenMap.builder(metadata.sections); this.previousIndicesLookup = metadata.indicesLookup; this.mappingsByHash = new HashMap<>(metadata.mappingsByHash); this.checkForUnusedMappings = false; @@ -1848,7 +1832,7 @@ private Builder(Map mappingsByHash, int indexCountHint) indices = ImmutableOpenMap.builder(indexCountHint); aliasedIndices = ImmutableOpenMap.builder(); templates = ImmutableOpenMap.builder(); - customs = ImmutableOpenMap.builder(); + sections = ImmutableOpenMap.builder(); reservedStateMetadata = new HashMap<>(); indexGraveyard(IndexGraveyard.builder().build()); // create new empty index graveyard to initialize previousIndicesLookup = null; @@ -2067,30 +2051,30 @@ public Builder put(String name, ComponentTemplate componentTemplate) { Objects.requireNonNull(componentTemplate, "it is invalid to add a null component template: " + name); // ಠ_ಠ at ImmutableOpenMap Map existingTemplates = Optional.ofNullable( - (ComponentTemplateMetadata) this.customs.get(ComponentTemplateMetadata.TYPE) + (ComponentTemplateMetadata) this.sections.get(ComponentTemplateMetadata.TYPE) ).map(ctm -> new HashMap<>(ctm.componentTemplates())).orElse(new HashMap<>()); existingTemplates.put(name, componentTemplate); - this.customs.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(existingTemplates)); + this.sections.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(existingTemplates)); return this; } public Builder removeComponentTemplate(String name) { // ಠ_ಠ at ImmutableOpenMap Map existingTemplates = Optional.ofNullable( - (ComponentTemplateMetadata) this.customs.get(ComponentTemplateMetadata.TYPE) + (ComponentTemplateMetadata) this.sections.get(ComponentTemplateMetadata.TYPE) ).map(ctm -> new HashMap<>(ctm.componentTemplates())).orElse(new HashMap<>()); existingTemplates.remove(name); - this.customs.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(existingTemplates)); + this.sections.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(existingTemplates)); return this; } public Builder componentTemplates(Map componentTemplates) { - this.customs.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(componentTemplates)); + this.sections.put(ComponentTemplateMetadata.TYPE, new ComponentTemplateMetadata(componentTemplates)); return this; } public Builder indexTemplates(Map indexTemplates) { - this.customs.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(indexTemplates)); + this.sections.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(indexTemplates)); return this; } @@ -2098,20 +2082,20 @@ public Builder put(String name, ComposableIndexTemplate indexTemplate) { Objects.requireNonNull(indexTemplate, "it is invalid to add a null index template: " + name); // ಠ_ಠ at ImmutableOpenMap Map existingTemplates = Optional.ofNullable( - (ComposableIndexTemplateMetadata) this.customs.get(ComposableIndexTemplateMetadata.TYPE) + (ComposableIndexTemplateMetadata) this.sections.get(ComposableIndexTemplateMetadata.TYPE) ).map(itmd -> new HashMap<>(itmd.indexTemplates())).orElse(new HashMap<>()); existingTemplates.put(name, indexTemplate); - this.customs.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(existingTemplates)); + this.sections.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(existingTemplates)); return this; } public Builder removeIndexTemplate(String name) { // ಠ_ಠ at ImmutableOpenMap Map existingTemplates = Optional.ofNullable( - (ComposableIndexTemplateMetadata) this.customs.get(ComposableIndexTemplateMetadata.TYPE) + (ComposableIndexTemplateMetadata) this.sections.get(ComposableIndexTemplateMetadata.TYPE) ).map(itmd -> new HashMap<>(itmd.indexTemplates())).orElse(new HashMap<>()); existingTemplates.remove(name); - this.customs.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(existingTemplates)); + this.sections.put(ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(existingTemplates)); return this; } @@ -2127,7 +2111,7 @@ public Builder dataStreams(Map dataStreams, Mapbuilder().putAllFromMap(dataStreams).build(), @@ -2147,12 +2131,12 @@ public Builder put(DataStream dataStream) { // trigger this validation on each new Metadata creation, even if there are no changes to data streams. dataStream.validate(indices::get); - this.customs.put(DataStreamMetadata.TYPE, dataStreamMetadata().withAddedDatastream(dataStream)); + this.sections.put(DataStreamMetadata.TYPE, dataStreamMetadata().withAddedDatastream(dataStream)); return this; } public DataStreamMetadata dataStreamMetadata() { - return (DataStreamMetadata) this.customs.getOrDefault(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY); + return (DataStreamMetadata) this.sections.getOrDefault(DataStreamMetadata.TYPE, DataStreamMetadata.EMPTY); } public boolean put(String aliasName, String dataStream, Boolean isWriteDataStream, String filter) { @@ -2162,13 +2146,13 @@ public boolean put(String aliasName, String dataStream, Boolean isWriteDataStrea if (existing == updated) { return false; } - this.customs.put(DataStreamMetadata.TYPE, updated); + this.sections.put(DataStreamMetadata.TYPE, updated); return true; } public Builder removeDataStream(String name) { previousIndicesLookup = null; - this.customs.put(DataStreamMetadata.TYPE, dataStreamMetadata().withRemovedDataStream(name)); + this.sections.put(DataStreamMetadata.TYPE, dataStreamMetadata().withRemovedDataStream(name)); return this; } @@ -2180,32 +2164,32 @@ public boolean removeDataStreamAlias(String aliasName, String dataStreamName, bo if (existing == updated) { return false; } - this.customs.put(DataStreamMetadata.TYPE, updated); + this.sections.put(DataStreamMetadata.TYPE, updated); return true; } - public Custom getCustom(String type) { - return customs.get(type); + public MetadataSection getSection(String type) { + return sections.get(type); } - public Builder putCustom(String type, Custom custom) { - customs.put(type, Objects.requireNonNull(custom, type)); + public Builder putSection(String type, MetadataSection section) { + sections.put(type, Objects.requireNonNull(section, type)); return this; } - public Builder removeCustom(String type) { - customs.remove(type); + public Builder removeSection(String type) { + sections.remove(type); return this; } - public Builder removeCustomIf(BiPredicate p) { - customs.removeAll(p); + public Builder removeSectionIf(BiPredicate p) { + sections.removeAll(p); return this; } - public Builder customs(Map customs) { - customs.forEach((key, value) -> Objects.requireNonNull(value, key)); - this.customs.putAllFromMap(customs); + public Builder sections(Map sections) { + sections.forEach((key, value) -> Objects.requireNonNull(value, key)); + this.sections.putAllFromMap(sections); return this; } @@ -2240,12 +2224,12 @@ public Builder removeReservedState(ReservedStateMetadata metadata) { } public Builder indexGraveyard(final IndexGraveyard indexGraveyard) { - putCustom(IndexGraveyard.TYPE, indexGraveyard); + putSection(IndexGraveyard.TYPE, indexGraveyard); return this; } public IndexGraveyard indexGraveyard() { - return (IndexGraveyard) getCustom(IndexGraveyard.TYPE); + return (IndexGraveyard) getSection(IndexGraveyard.TYPE); } public Builder updateSettings(Settings settings, String... indices) { @@ -2442,7 +2426,7 @@ public Metadata build(boolean skipNameCollisionChecks) { indicesMap, aliasedIndices, templates.build(), - customs.build(), + sections.build(), allIndicesArray, visibleIndicesArray, allOpenIndicesArray, @@ -2786,8 +2770,8 @@ public static Metadata fromXContent(XContentParser parser) throws IOException { } } else { try { - Custom custom = parser.namedObject(Custom.class, currentFieldName, null); - builder.putCustom(custom.getWriteableName(), custom); + MetadataSection section = parser.namedObject(MetadataSection.class, currentFieldName, null); + builder.putSection(section.getWriteableName(), section); } catch (NamedObjectNotFoundException ex) { logger.warn("Skipping unknown custom object with type {}", currentFieldName); parser.skipChildren(); diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java index c6eb56926eca0..420e9fd889e21 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateService.java @@ -734,7 +734,7 @@ private void validateIndexTemplateV2(String name, ComposableIndexTemplate indexT if (templateToValidate.isDeprecated() == false) { validateUseOfDeprecatedComponentTemplates(name, templateToValidate, currentState.metadata().componentTemplates()); - validateUseOfDeprecatedIngestPipelines(name, currentState.metadata().custom(IngestMetadata.TYPE), combinedSettings); + validateUseOfDeprecatedIngestPipelines(name, currentState.metadata().section(IngestMetadata.TYPE), combinedSettings); // TODO come up with a plan how to validate usage of deprecated ILM policies // we don't have access to the core/main plugin here so we can't use the IndexLifecycleMetadata type } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataSection.java b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataSection.java new file mode 100644 index 0000000000000..1295327199b17 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/MetadataSection.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License + * 2.0 and the Server Side Public License, v 1; you may not use this file except + * in compliance with, at your election, the Elastic License 2.0 or the Server + * Side Public License, v 1. + */ + +package org.elasticsearch.cluster.metadata; + +import org.elasticsearch.cluster.NamedDiffable; +import org.elasticsearch.common.xcontent.ChunkedToXContent; +import org.elasticsearch.xcontent.NamedXContentRegistry; + +import java.util.EnumSet; + +/** + * Custom metadata that persists (via XContent) across restarts. The deserialization method for each implementation must be registered + * with the {@link NamedXContentRegistry}. + */ +public interface MetadataSection extends NamedDiffable, ChunkedToXContent { + + EnumSet context(); + + /** + * @return true if this custom could be restored from snapshot + */ + default boolean isRestorable() { + return context().contains(Metadata.XContentContext.SNAPSHOT); + } +} diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java index 38eefa4085527..94d7eca9efcec 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadata.java @@ -40,7 +40,7 @@ * * Stored in the cluster state as custom metadata. */ -public class NodesShutdownMetadata implements Metadata.Custom { +public class NodesShutdownMetadata implements MetadataSection { public static final String TYPE = "node_shutdown"; public static final TransportVersion NODE_SHUTDOWN_VERSION = TransportVersions.V_7_13_0; public static final NodesShutdownMetadata EMPTY = new NodesShutdownMetadata(Map.of()); @@ -64,7 +64,7 @@ public static NodesShutdownMetadata fromXContent(XContentParser parser) { return PARSER.apply(parser, null); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new NodeShutdownMetadataDiff(in); } @@ -156,7 +156,7 @@ public NodesShutdownMetadata removeSingleNodeMetadata(String nodeId) { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new NodeShutdownMetadataDiff((NodesShutdownMetadata) previousState, this); } @@ -196,7 +196,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore /** * Handles diffing and appling diffs for {@link NodesShutdownMetadata} as necessary for the cluster state infrastructure. */ - public static class NodeShutdownMetadataDiff implements NamedDiff { + public static class NodeShutdownMetadataDiff implements NamedDiff { private final Diff> nodesDiff; @@ -214,7 +214,7 @@ public NodeShutdownMetadataDiff(StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { TreeMap newNodes = new TreeMap<>(nodesDiff.apply(((NodesShutdownMetadata) part).nodes)); return new NodesShutdownMetadata(newNodes); } diff --git a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java index 9b07fbadb2328..677c59e817584 100644 --- a/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java +++ b/server/src/main/java/org/elasticsearch/cluster/metadata/RepositoriesMetadata.java @@ -14,7 +14,6 @@ import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.cluster.metadata.Metadata.Custom; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,7 +36,7 @@ /** * Contains metadata about registered snapshot repositories */ -public class RepositoriesMetadata extends AbstractNamedDiffable implements Custom { +public class RepositoriesMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "repositories"; @@ -52,7 +51,7 @@ public class RepositoriesMetadata extends AbstractNamedDiffable implemen private final List repositories; public static RepositoriesMetadata get(ClusterState state) { - return state.metadata().custom(TYPE, EMPTY); + return state.metadata().section(TYPE, EMPTY); } /** @@ -182,8 +181,8 @@ public RepositoriesMetadata(StreamInput in) throws IOException { this.repositories = in.readCollectionAsImmutableList(RepositoryMetadata::new); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(Custom.class, TYPE, in); + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(MetadataSection.class, TYPE, in); } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java index 8fb91d89417e0..879375f52761f 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorService.java @@ -142,7 +142,7 @@ public ShardAllocationStatus createNewStatus(Metadata metadata) { @Override public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { var state = clusterService.state(); - var shutdown = state.getMetadata().custom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY); + var shutdown = state.getMetadata().section(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY); var status = createNewStatus(state.getMetadata()); updateShardAllocationStatus(status, state, shutdown, verbose); return createIndicator( diff --git a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java index 1db0ec7346a32..9ac3bc4ff14fa 100644 --- a/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java +++ b/server/src/main/java/org/elasticsearch/gateway/MetaStateService.java @@ -125,7 +125,7 @@ private Tuple loadFullStateBWC() throws IOException { final IndexGraveyard indexGraveyard; if (globalMetadata != null) { metadataBuilder = Metadata.builder(globalMetadata); - indexGraveyard = globalMetadata.custom(IndexGraveyard.TYPE); + indexGraveyard = globalMetadata.section(IndexGraveyard.TYPE); } else { metadataBuilder = Metadata.builder(); indexGraveyard = IndexGraveyard.builder().build(); diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java index f563e2b27ddd7..7b1adcfbe161f 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNode.java @@ -12,7 +12,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.AllocatedPersistentTask; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import java.util.Map; @@ -34,14 +34,14 @@ protected void onCancelled() { } @Nullable - public static PersistentTasksCustomMetadata.PersistentTask findTask(ClusterState clusterState) { - PersistentTasksCustomMetadata taskMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + public static PersistentTasksMetadataSection.PersistentTask findTask(ClusterState clusterState) { + PersistentTasksMetadataSection taskMetadata = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); return taskMetadata == null ? null : taskMetadata.getTask(TASK_NAME); } @Nullable public static DiscoveryNode findHealthNode(ClusterState clusterState) { - PersistentTasksCustomMetadata.PersistentTask task = findTask(clusterState); + PersistentTasksMetadataSection.PersistentTask task = findTask(clusterState); if (task == null || task.isAssigned() == false) { return null; } diff --git a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java index cc908cd7cad2c..93abf916a248e 100644 --- a/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java +++ b/server/src/main/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutor.java @@ -27,8 +27,8 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -131,7 +131,7 @@ protected HealthNode createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, + PersistentTasksMetadataSection.PersistentTask taskInProgress, Map headers ) { return new HealthNode(id, type, action, getDescription(taskInProgress), parentTaskId, headers); @@ -141,7 +141,7 @@ protected HealthNode createTask( * Returns the node id from the eligible health nodes */ @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( HealthNodeTaskParams params, Collection candidateNodes, ClusterState clusterState @@ -150,7 +150,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( if (discoveryNode == null) { return NO_NODE_FOUND; } else { - return new PersistentTasksCustomMetadata.Assignment(discoveryNode.getId(), ""); + return new PersistentTasksMetadataSection.Assignment(discoveryNode.getId(), ""); } } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java index f3b778388809f..d4d9702597b19 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestMetadata.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.util.Maps; @@ -34,7 +35,7 @@ /** * Holds the ingest pipelines that are available in the cluster */ -public final class IngestMetadata implements Metadata.Custom { +public final class IngestMetadata implements MetadataSection { public static final String TYPE = "ingest"; private static final ParseField PIPELINES_FIELD = new ParseField("pipeline"); @@ -107,15 +108,15 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom before) { + public Diff diff(MetadataSection before) { return new IngestMetadataDiff((IngestMetadata) before, this); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new IngestMetadataDiff(in); } - static class IngestMetadataDiff implements NamedDiff { + static class IngestMetadataDiff implements NamedDiff { final Diff> pipelines; @@ -133,7 +134,7 @@ static class IngestMetadataDiff implements NamedDiff { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new IngestMetadata(pipelines.apply(((IngestMetadata) part).pipelines)); } diff --git a/server/src/main/java/org/elasticsearch/ingest/IngestService.java b/server/src/main/java/org/elasticsearch/ingest/IngestService.java index 0b1a135a17214..914a9566c51b6 100644 --- a/server/src/main/java/org/elasticsearch/ingest/IngestService.java +++ b/server/src/main/java/org/elasticsearch/ingest/IngestService.java @@ -142,7 +142,7 @@ public static MatcherWatchdog createGrokThreadWatchdog(Environment env, ThreadPo */ static final ClusterStateTaskExecutor PIPELINE_TASK_EXECUTOR = batchExecutionContext -> { final var allIndexMetadata = batchExecutionContext.initialState().metadata().indices().values(); - final IngestMetadata initialIngestMetadata = batchExecutionContext.initialState().metadata().custom(IngestMetadata.TYPE); + final IngestMetadata initialIngestMetadata = batchExecutionContext.initialState().metadata().section(IngestMetadata.TYPE); var currentIngestMetadata = initialIngestMetadata; for (final var taskContext : batchExecutionContext.taskContexts()) { try { @@ -158,7 +158,7 @@ public static MatcherWatchdog createGrokThreadWatchdog(Environment env, ThreadPo final var finalIngestMetadata = currentIngestMetadata; return finalIngestMetadata == initialIngestMetadata ? batchExecutionContext.initialState() - : batchExecutionContext.initialState().copyAndUpdateMetadata(b -> b.putCustom(IngestMetadata.TYPE, finalIngestMetadata)); + : batchExecutionContext.initialState().copyAndUpdateMetadata(b -> b.putSection(IngestMetadata.TYPE, finalIngestMetadata)); }; /** @@ -404,7 +404,7 @@ static void validateNotInUse(String pipeline, Collection allIndex // Returning PipelineConfiguration instead of Pipeline, because Pipeline and Processor interface don't // know how to serialize themselves. public static List getPipelines(ClusterState clusterState, String... ids) { - IngestMetadata ingestMetadata = clusterState.getMetadata().custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = clusterState.getMetadata().section(IngestMetadata.TYPE); return innerGetPipelines(ingestMetadata, ids); } @@ -472,7 +472,7 @@ public void validatePipelineRequest(PutPipelineRequest request, NodesInfoRespons } public static boolean isNoOpPipelineUpdate(ClusterState state, PutPipelineRequest request) { - IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); + IngestMetadata currentIngestMetadata = state.metadata().section(IngestMetadata.TYPE); if (request.getVersion() == null && currentIngestMetadata != null && currentIngestMetadata.getPipelines().containsKey(request.getId())) { @@ -1184,7 +1184,7 @@ public void applyClusterState(final ClusterChangedEvent event) { // when only the part of the cluster state that a component is interested in, is updated.) ingestClusterStateListeners.forEach(consumer -> consumer.accept(state)); - IngestMetadata newIngestMetadata = state.getMetadata().custom(IngestMetadata.TYPE); + IngestMetadata newIngestMetadata = state.getMetadata().section(IngestMetadata.TYPE); if (newIngestMetadata == null) { return; } diff --git a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java index 895fe65b92246..e592804cf5331 100644 --- a/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java +++ b/server/src/main/java/org/elasticsearch/persistent/AllocatedPersistentTask.java @@ -62,7 +62,7 @@ public Status getStatus() { */ public void updatePersistentTaskState( final PersistentTaskState state, - final ActionListener> listener + final ActionListener> listener ) { persistentTasksService.sendUpdateStateRequest(persistentTaskId, allocationId, state, null, listener); } @@ -99,7 +99,7 @@ public long getAllocationId() { * @param listener the callback listener */ public void waitForPersistentTask( - final Predicate> predicate, + final Predicate> predicate, final @Nullable TimeValue timeout, final PersistentTasksService.WaitForPersistentTaskListener listener ) { @@ -203,7 +203,7 @@ private void completeAndNotifyIfNeeded(@Nullable Exception failure, @Nullable St null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { logger.trace("notification for task [{}] with id [{}] was successful", getAction(), getPersistentTaskId()); } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java index fca6a9b2dde7d..0cb70bf99ac1c 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTaskResponse.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import java.io.IOException; import java.util.Objects; diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java index ba7b4bb51d9c7..1916acae336f4 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksClusterService.java @@ -30,8 +30,8 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.decider.AssignmentDecision; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.threadpool.ThreadPool; @@ -115,7 +115,7 @@ public void createPersistentTask( submitUnbatchedTask("create persistent task", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - PersistentTasksCustomMetadata.Builder builder = builder(currentState); + PersistentTasksMetadataSection.Builder builder = builder(currentState); if (builder.hasTask(taskId)) { throw new ResourceAlreadyExistsException("task with id {" + taskId + "} already exist"); } @@ -134,7 +134,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - PersistentTasksCustomMetadata tasks = newState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = newState.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks != null) { PersistentTask task = tasks.getTask(taskId); listener.onResponse(task); @@ -172,7 +172,7 @@ public void completePersistentTask(String id, long allocationId, Exception failu submitUnbatchedTask(source, new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - PersistentTasksCustomMetadata.Builder tasksInProgress = builder(currentState); + PersistentTasksMetadataSection.Builder tasksInProgress = builder(currentState); if (tasksInProgress.hasTask(id, allocationId)) { tasksInProgress.removeTask(id); return update(currentState, tasksInProgress); @@ -180,7 +180,7 @@ public ClusterState execute(ClusterState currentState) { if (tasksInProgress.hasTask(id)) { logger.warn( "The task [{}] with id [{}] was found but it has a different allocation id [{}], status is not updated", - PersistentTasksCustomMetadata.getTaskWithId(currentState, id).getTaskName(), + PersistentTasksMetadataSection.getTaskWithId(currentState, id).getTaskName(), id, allocationId ); @@ -199,7 +199,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { // Using old state since in the new state the task is already gone - listener.onResponse(PersistentTasksCustomMetadata.getTaskWithId(oldState, id)); + listener.onResponse(PersistentTasksMetadataSection.getTaskWithId(oldState, id)); } }); } @@ -214,7 +214,7 @@ public void removePersistentTask(String id, ActionListener> li submitUnbatchedTask("remove persistent task", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - PersistentTasksCustomMetadata.Builder tasksInProgress = builder(currentState); + PersistentTasksMetadataSection.Builder tasksInProgress = builder(currentState); if (tasksInProgress.hasTask(id)) { return update(currentState, tasksInProgress.removeTask(id)); } else { @@ -230,7 +230,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { // Using old state since in the new state the task is already gone - listener.onResponse(PersistentTasksCustomMetadata.getTaskWithId(oldState, id)); + listener.onResponse(PersistentTasksMetadataSection.getTaskWithId(oldState, id)); } }); } @@ -252,7 +252,7 @@ public void updatePersistentTaskState( submitUnbatchedTask("update task state [" + taskId + "]", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) { - PersistentTasksCustomMetadata.Builder tasksInProgress = builder(currentState); + PersistentTasksMetadataSection.Builder tasksInProgress = builder(currentState); if (tasksInProgress.hasTask(taskId, taskAllocationId)) { return update(currentState, tasksInProgress.updateTaskState(taskId, taskState)); } else { @@ -272,7 +272,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - listener.onResponse(PersistentTasksCustomMetadata.getTaskWithId(newState, taskId)); + listener.onResponse(PersistentTasksMetadataSection.getTaskWithId(newState, taskId)); } }); } @@ -297,7 +297,7 @@ public void unassignPersistentTask( submitUnbatchedTask("unassign persistent task from any node", new ClusterStateUpdateTask() { @Override public ClusterState execute(ClusterState currentState) throws Exception { - PersistentTasksCustomMetadata.Builder tasksInProgress = builder(currentState); + PersistentTasksMetadataSection.Builder tasksInProgress = builder(currentState); if (tasksInProgress.hasTask(taskId, taskAllocationId)) { logger.trace("Unassigning task {} with allocation id {}", taskId, taskAllocationId); return update(currentState, tasksInProgress.reassignTask(taskId, unassignedAssignment(reason))); @@ -313,7 +313,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - listener.onResponse(PersistentTasksCustomMetadata.getTaskWithId(newState, taskId)); + listener.onResponse(PersistentTasksMetadataSection.getTaskWithId(newState, taskId)); } }); } @@ -408,7 +408,7 @@ public void onFailure(Exception e) { @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { reassigningTasks.set(false); - if (isAnyTaskUnassigned(newState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE))) { + if (isAnyTaskUnassigned(newState.getMetadata().section(PersistentTasksMetadataSection.TYPE))) { periodicRechecker.rescheduleIfNecessary(); } } @@ -421,7 +421,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) * persistent tasks changed. */ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { - final PersistentTasksCustomMetadata tasks = event.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection tasks = event.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks == null) { return false; } @@ -449,7 +449,7 @@ boolean shouldReassignPersistentTasks(final ClusterChangedEvent event) { /** * Returns true if any persistent task is unassigned. */ - private static boolean isAnyTaskUnassigned(final PersistentTasksCustomMetadata tasks) { + private static boolean isAnyTaskUnassigned(final PersistentTasksMetadataSection tasks) { return tasks != null && tasks.tasks().stream().anyMatch(task -> task.getAssignment().isAssigned() == false); } @@ -462,7 +462,7 @@ private static boolean isAnyTaskUnassigned(final PersistentTasksCustomMetadata t ClusterState reassignTasks(final ClusterState currentState) { ClusterState clusterState = currentState; - final PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection tasks = currentState.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks != null) { logger.trace("reassigning {} persistent tasks", tasks.tasks().size()); final DiscoveryNodes nodes = currentState.nodes(); @@ -492,8 +492,8 @@ ClusterState reassignTasks(final ClusterState currentState) { /** Returns true if the persistent tasks are not equal between the previous and the current cluster state **/ static boolean persistentTasksChanged(final ClusterChangedEvent event) { - String type = PersistentTasksCustomMetadata.TYPE; - return Objects.equals(event.state().metadata().custom(type), event.previousState().metadata().custom(type)) == false; + String type = PersistentTasksMetadataSection.TYPE; + return Objects.equals(event.state().metadata().section(type), event.previousState().metadata().section(type)) == false; } /** Returns true if the task is not assigned or is assigned to a non-existing node */ @@ -501,14 +501,16 @@ public static boolean needsReassignment(final Assignment assignment, final Disco return (assignment.isAssigned() == false || nodes.nodeExists(assignment.getExecutorNode()) == false); } - private static PersistentTasksCustomMetadata.Builder builder(ClusterState currentState) { - return PersistentTasksCustomMetadata.builder(currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE)); + private static PersistentTasksMetadataSection.Builder builder(ClusterState currentState) { + return PersistentTasksMetadataSection.builder(currentState.getMetadata().section(PersistentTasksMetadataSection.TYPE)); } - private static ClusterState update(ClusterState currentState, PersistentTasksCustomMetadata.Builder tasksInProgress) { + private static ClusterState update(ClusterState currentState, PersistentTasksMetadataSection.Builder tasksInProgress) { if (tasksInProgress.isChanged()) { return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasksInProgress.build())) + .metadata( + Metadata.builder(currentState.metadata()).putSection(PersistentTasksMetadataSection.TYPE, tasksInProgress.build()) + ) .build(); } else { return currentState; @@ -539,7 +541,7 @@ public void runInternal() { // TODO just run on the elected master? final ClusterState state = clusterService.state(); logger.trace("periodic persistent task assignment check running for cluster state {}", state.getVersion()); - if (isAnyTaskUnassigned(state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE))) { + if (isAnyTaskUnassigned(state.getMetadata().section(PersistentTasksMetadataSection.TYPE))) { reassignPersistentTasks(); } } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java index 0bdd5999731dd..5fd9b3944765b 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksExecutor.java @@ -11,8 +11,8 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.core.Nullable; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.tasks.TaskId; import java.util.Collection; @@ -65,7 +65,7 @@ protected DiscoveryNode selectLeastLoadedNode( ) { long minLoad = Long.MAX_VALUE; DiscoveryNode minLoadedNode = null; - PersistentTasksCustomMetadata persistentTasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); for (DiscoveryNode node : candidateNodes) { if (selector.test(node)) { if (persistentTasks == null) { diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksMetadataSection.java similarity index 94% rename from server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java rename to server/src/main/java/org/elasticsearch/persistent/PersistentTasksMetadataSection.java index 5fdac777b5a75..5f5c3214c427b 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksCustomMetadata.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksMetadataSection.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -52,7 +53,7 @@ /** * A cluster state record that contains a list of all running persistent tasks */ -public final class PersistentTasksCustomMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public final class PersistentTasksMetadataSection extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "persistent_tasks"; private static final String API_CONTEXT = Metadata.XContentContext.API.toString(); @@ -62,7 +63,7 @@ public final class PersistentTasksCustomMetadata extends AbstractNamedDiffable> tasks; private final long lastAllocationId; - public PersistentTasksCustomMetadata(long lastAllocationId, Map> tasks) { + public PersistentTasksMetadataSection(long lastAllocationId, Map> tasks) { this.lastAllocationId = lastAllocationId; this.tasks = tasks; } @@ -129,8 +130,8 @@ public PersistentTasksCustomMetadata(long lastAllocationId, Map> findTasks(String taskName, Predicate context() { return ALL_CONTEXTS; } - public static PersistentTasksCustomMetadata fromXContent(XContentParser parser) { + public static PersistentTasksMetadataSection fromXContent(XContentParser parser) { return PERSISTENT_TASKS_PARSER.apply(parser, null).build(); } @SuppressWarnings("unchecked") public static PersistentTask getTaskWithId(ClusterState clusterState, String taskId) { - PersistentTasksCustomMetadata tasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.metadata().section(PersistentTasksMetadataSection.TYPE); if (tasks != null) { return (PersistentTask) tasks.getTask(taskId); } @@ -232,12 +233,12 @@ public static PersistentTask getTa * a copy with the modified tasks */ public static ClusterState disassociateDeadNodes(ClusterState clusterState) { - PersistentTasksCustomMetadata tasks = getPersistentTasksCustomMetadata(clusterState); + PersistentTasksMetadataSection tasks = getPersistentTasksCustomMetadata(clusterState); if (tasks == null) { return clusterState; } - PersistentTasksCustomMetadata.Builder taskBuilder = PersistentTasksCustomMetadata.builder(tasks); + PersistentTasksMetadataSection.Builder taskBuilder = PersistentTasksMetadataSection.builder(tasks); for (PersistentTask task : tasks.tasks()) { if (task.getAssignment().getExecutorNode() != null && clusterState.nodes().nodeExists(task.getAssignment().getExecutorNode()) == false) { @@ -250,7 +251,7 @@ public static ClusterState disassociateDeadNodes(ClusterState clusterState) { } Metadata.Builder metadataBuilder = Metadata.builder(clusterState.metadata()); - metadataBuilder.putCustom(TYPE, taskBuilder.build()); + metadataBuilder.putSection(TYPE, taskBuilder.build()); return ClusterState.builder(clusterState).metadata(metadataBuilder).build(); } @@ -531,7 +532,7 @@ public String getWriteableName() { return TYPE; } - public PersistentTasksCustomMetadata(StreamInput in) throws IOException { + public PersistentTasksMetadataSection(StreamInput in) throws IOException { lastAllocationId = in.readLong(); tasks = in.readMap(PersistentTask::new); } @@ -546,8 +547,8 @@ public void writeTo(StreamOutput out) throws IOException { out.writeMap(filteredTasks, StreamOutput::writeWriteable); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { - return readDiffFrom(Metadata.Custom.class, TYPE, in); + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + return readDiffFrom(MetadataSection.class, TYPE, in); } @Override @@ -562,7 +563,7 @@ public static Builder builder() { return new Builder(); } - public static Builder builder(PersistentTasksCustomMetadata tasks) { + public static Builder builder(PersistentTasksMetadataSection tasks) { return new Builder(tasks); } @@ -573,7 +574,7 @@ public static class Builder { private Builder() {} - private Builder(PersistentTasksCustomMetadata tasksInProgress) { + private Builder(PersistentTasksMetadataSection tasksInProgress) { if (tasksInProgress != null) { tasks.putAll(tasksInProgress.tasks); lastAllocationId = tasksInProgress.lastAllocationId; @@ -690,8 +691,8 @@ public boolean isChanged() { return changed; } - public PersistentTasksCustomMetadata build() { - return new PersistentTasksCustomMetadata(lastAllocationId, Collections.unmodifiableMap(tasks)); + public PersistentTasksMetadataSection build() { + return new PersistentTasksMetadataSection(lastAllocationId, Collections.unmodifiableMap(tasks)); } } } diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java index b1deee6b21e0e..58f089aa76c76 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksNodeService.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskAwareRequest; import org.elasticsearch.tasks.TaskId; @@ -71,8 +71,8 @@ public void clusterChanged(ClusterChangedEvent event) { // we start cancelling all local tasks before cluster has a chance to recover. return; } - PersistentTasksCustomMetadata tasks = event.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata previousTasks = event.previousState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = event.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection previousTasks = event.previousState().getMetadata().section(PersistentTasksMetadataSection.TYPE); // Cluster State Local State Local Action // STARTED NULL Create as STARTED, Start diff --git a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java index 3ea4b19f77a45..897d4c862163e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java +++ b/server/src/main/java/org/elasticsearch/persistent/PersistentTasksService.java @@ -22,7 +22,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.node.NodeClosedException; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -192,7 +192,7 @@ public void waitForPersistentTaskCondition( ClusterStateObserver.waitForState(clusterService, threadPool.getThreadContext(), new ClusterStateObserver.Listener() { @Override public void onNewClusterState(ClusterState state) { - listener.onResponse(PersistentTasksCustomMetadata.getTaskWithId(state, taskId)); + listener.onResponse(PersistentTasksMetadataSection.getTaskWithId(state, taskId)); } @Override @@ -204,7 +204,7 @@ public void onClusterServiceClose() { public void onTimeout(TimeValue timeout) { listener.onTimeout(timeout); } - }, clusterState -> predicate.test(PersistentTasksCustomMetadata.getTaskWithId(clusterState, taskId)), timeout, logger); + }, clusterState -> predicate.test(PersistentTasksMetadataSection.getTaskWithId(clusterState, taskId)), timeout, logger); } /** @@ -215,7 +215,7 @@ public void onTimeout(TimeValue timeout) { * @param listener the callback listener */ public void waitForPersistentTasksCondition( - final Predicate predicate, + final Predicate predicate, final @Nullable TimeValue timeout, final ActionListener listener ) { @@ -234,7 +234,7 @@ public void onClusterServiceClose() { public void onTimeout(TimeValue timeout) { listener.onFailure(new IllegalStateException("Timed out when waiting for persistent tasks after " + timeout)); } - }, clusterState -> predicate.test(clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE)), timeout, logger); + }, clusterState -> predicate.test(clusterState.metadata().section(PersistentTasksMetadataSection.TYPE)), timeout, logger); } public interface WaitForPersistentTaskListener

extends ActionListener> { diff --git a/server/src/main/java/org/elasticsearch/persistent/package-info.java b/server/src/main/java/org/elasticsearch/persistent/package-info.java index db4e3acf71f0f..41048ec0eab7e 100644 --- a/server/src/main/java/org/elasticsearch/persistent/package-info.java +++ b/server/src/main/java/org/elasticsearch/persistent/package-info.java @@ -18,14 +18,14 @@ * {@link org.elasticsearch.persistent.PersistentTasksClusterService} to update cluster state with the record about running persistent * task. *

- * 2. The master node updates the {@link org.elasticsearch.persistent.PersistentTasksCustomMetadata} in the cluster state to indicate + * 2. The master node updates the {@link org.elasticsearch.persistent.PersistentTasksMetadataSection} in the cluster state to indicate * that there is a new persistent task running in the system. *

* 3. The {@link org.elasticsearch.persistent.PersistentTasksNodeService} running on every node in the cluster monitors changes in * the cluster state and starts execution of all new tasks assigned to the node it is running on. *

* 4. If the task fails to start on the node, the {@link org.elasticsearch.persistent.PersistentTasksNodeService} uses the - * {@link org.elasticsearch.persistent.PersistentTasksCustomMetadata} to notify the + * {@link org.elasticsearch.persistent.PersistentTasksMetadataSection} to notify the * {@link org.elasticsearch.persistent.PersistentTasksService}, which reassigns the action to another node in the cluster. *

* 5. If a task finishes successfully on the node and calls listener.onResponse(), the corresponding persistent action is removed from the diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java index de4ae1051ba62..51b87f2e8903c 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesService.java @@ -330,7 +330,7 @@ public ClusterState execute(ClusterState currentState) { repositoriesMetadata.add(new RepositoryMetadata(request.name(), request.type(), request.settings())); } repositories = new RepositoriesMetadata(repositoriesMetadata); - mdBuilder.putCustom(RepositoriesMetadata.TYPE, repositories); + mdBuilder.putSection(RepositoriesMetadata.TYPE, repositories); changed = true; return ClusterState.builder(currentState).metadata(mdBuilder).build(); } @@ -435,7 +435,7 @@ public ClusterState execute(ClusterState currentState) { } else { final RepositoriesMetadata newReposMetadata = currentReposMetadata.withUuid(repositoryName, repositoryUuid); final Metadata.Builder metadata = Metadata.builder(currentState.metadata()) - .putCustom(RepositoriesMetadata.TYPE, newReposMetadata); + .putSection(RepositoriesMetadata.TYPE, newReposMetadata); return ClusterState.builder(currentState).metadata(metadata).build(); } } @@ -518,7 +518,7 @@ public ClusterState execute(ClusterState currentState) { } if (changed) { repositories = new RepositoriesMetadata(repositoriesMetadata); - mdBuilder.putCustom(RepositoriesMetadata.TYPE, repositories); + mdBuilder.putSection(RepositoriesMetadata.TYPE, repositories); return ClusterState.builder(currentState).metadata(mdBuilder).build(); } } diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index ddef1e1b808fe..0afd527d29e7f 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -2349,7 +2349,7 @@ private ClusterState getClusterStateWithUpdatedRepositoryGeneration(ClusterState return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.getMetadata()) - .putCustom( + .putSection( RepositoriesMetadata.TYPE, RepositoriesMetadata.get(currentState) .withUpdatedGeneration(repoMetadata.name(), repoData.getGenId(), repoData.getGenId()) @@ -2545,7 +2545,7 @@ public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.metadata()) - .putCustom( + .putSection( RepositoriesMetadata.TYPE, state.withUpdatedGeneration( metadata.name(), @@ -2720,7 +2720,7 @@ public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.getMetadata()) - .putCustom( + .putSection( RepositoriesMetadata.TYPE, RepositoriesMetadata.get(currentState).withUpdatedGeneration(repoName, safeGeneration, newGen) ) @@ -2864,7 +2864,7 @@ public ClusterState execute(ClusterState currentState) { : withGenerations.withUuid(metadata.name(), newRepositoryData.getUuid()); final ClusterState newClusterState = stateFilter.apply( ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.getMetadata()).putCustom(RepositoriesMetadata.TYPE, withUuid)) + .metadata(Metadata.builder(currentState.getMetadata()).putSection(RepositoriesMetadata.TYPE, withUuid)) .build() ); return updateRepositoryGenerationsIfNecessary(newClusterState, expectedGen, newGen); diff --git a/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java b/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java index bf076692133a5..1bec506c9d705 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptMetadata.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.DiffableUtils; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -37,7 +38,7 @@ * {@link ScriptMetadata} is used to store user-defined scripts * as part of the {@link ClusterState} using only an id as the key. */ -public final class ScriptMetadata implements Metadata.Custom, Writeable { +public final class ScriptMetadata implements MetadataSection, Writeable { /** * Standard logger used to warn about dropped scripts. @@ -96,7 +97,7 @@ public ScriptMetadata build() { } } - static final class ScriptMetadataDiff implements NamedDiff { + static final class ScriptMetadataDiff implements NamedDiff { final Diff> pipelines; @@ -119,7 +120,7 @@ public String getWriteableName() { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new ScriptMetadata(pipelines.apply(((ScriptMetadata) part).scripts)); } @@ -227,7 +228,7 @@ public static ScriptMetadata fromXContent(XContentParser parser) throws IOExcept return new ScriptMetadata(scripts); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new ScriptMetadataDiff(in); } @@ -271,7 +272,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore } @Override - public Diff diff(Metadata.Custom before) { + public Diff diff(MetadataSection before) { return new ScriptMetadataDiff((ScriptMetadata) before, this); } diff --git a/server/src/main/java/org/elasticsearch/script/ScriptService.java b/server/src/main/java/org/elasticsearch/script/ScriptService.java index 43c7cbb78f869..de295242a5a65 100644 --- a/server/src/main/java/org/elasticsearch/script/ScriptService.java +++ b/server/src/main/java/org/elasticsearch/script/ScriptService.java @@ -656,7 +656,7 @@ Map getScriptsFromClusterState() { return Collections.emptyMap(); } - ScriptMetadata scriptMetadata = clusterState.metadata().custom(ScriptMetadata.TYPE); + ScriptMetadata scriptMetadata = clusterState.metadata().section(ScriptMetadata.TYPE); if (scriptMetadata == null) { return Collections.emptyMap(); @@ -666,7 +666,7 @@ Map getScriptsFromClusterState() { } protected StoredScriptSource getScriptFromClusterState(String id) { - ScriptMetadata scriptMetadata = clusterState.metadata().custom(ScriptMetadata.TYPE); + ScriptMetadata scriptMetadata = clusterState.metadata().section(ScriptMetadata.TYPE); if (scriptMetadata == null) { throw new ResourceNotFoundException("unable to find script [" + id + "] in cluster state"); @@ -732,9 +732,9 @@ public void putStoredScript( submitUnbatchedTask(clusterService, "put-script-" + request.id(), new AckedClusterStateUpdateTask(request, listener) { @Override public ClusterState execute(ClusterState currentState) { - ScriptMetadata smd = currentState.metadata().custom(ScriptMetadata.TYPE); + ScriptMetadata smd = currentState.metadata().section(ScriptMetadata.TYPE); smd = ScriptMetadata.putStoredScript(smd, request.id(), source); - Metadata.Builder mdb = Metadata.builder(currentState.getMetadata()).putCustom(ScriptMetadata.TYPE, smd); + Metadata.Builder mdb = Metadata.builder(currentState.getMetadata()).putSection(ScriptMetadata.TYPE, smd); return ClusterState.builder(currentState).metadata(mdb).build(); } @@ -749,9 +749,9 @@ public static void deleteStoredScript( submitUnbatchedTask(clusterService, "delete-script-" + request.id(), new AckedClusterStateUpdateTask(request, listener) { @Override public ClusterState execute(ClusterState currentState) { - ScriptMetadata smd = currentState.metadata().custom(ScriptMetadata.TYPE); + ScriptMetadata smd = currentState.metadata().section(ScriptMetadata.TYPE); smd = ScriptMetadata.deleteStoredScript(smd, request.id()); - Metadata.Builder mdb = Metadata.builder(currentState.getMetadata()).putCustom(ScriptMetadata.TYPE, smd); + Metadata.Builder mdb = Metadata.builder(currentState.getMetadata()).putSection(ScriptMetadata.TYPE, smd); return ClusterState.builder(currentState).metadata(mdb).build(); } @@ -768,7 +768,7 @@ private static void submitUnbatchedTask( } public static StoredScriptSource getStoredScript(ClusterState state, GetStoredScriptRequest request) { - ScriptMetadata scriptMetadata = state.metadata().custom(ScriptMetadata.TYPE); + ScriptMetadata scriptMetadata = state.metadata().section(ScriptMetadata.TYPE); if (scriptMetadata != null) { return scriptMetadata.getStoredScript(request.id()); diff --git a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java index 9eecf6e47aa90..9ffd16a070984 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RegisteredPolicySnapshots.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -45,7 +46,7 @@ * it will not be removed from the registered set. A subsequent snapshot will then find that a registered snapshot * is no longer running and will infer that it failed, updating SnapshotLifecycleStats accordingly. */ -public class RegisteredPolicySnapshots implements Metadata.Custom { +public class RegisteredPolicySnapshots implements MetadataSection { public static final String TYPE = "registered_snapshots"; private static final ParseField SNAPSHOTS = new ParseField("snapshots"); @@ -89,7 +90,7 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new RegisteredSnapshotsDiff((RegisteredPolicySnapshots) previousState, this); } @@ -142,7 +143,7 @@ public boolean equals(Object obj) { return Objects.equals(snapshots, other.snapshots); } - public static class RegisteredSnapshotsDiff implements NamedDiff { + public static class RegisteredSnapshotsDiff implements NamedDiff { final List snapshots; RegisteredSnapshotsDiff(RegisteredPolicySnapshots before, RegisteredPolicySnapshots after) { @@ -154,7 +155,7 @@ public RegisteredSnapshotsDiff(StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new RegisteredPolicySnapshots(snapshots); } diff --git a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java index 25796606f2b1b..a9a96ac95bc7d 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/RestoreService.java @@ -1494,7 +1494,7 @@ private void applyGlobalStateRestore(ClusterState currentState, Metadata.Builder } // override existing restorable customs (as there might be nothing in snapshot to override them) - mdBuilder.removeCustomIf((key, value) -> value.isRestorable()); + mdBuilder.removeSectionIf((key, value) -> value.isRestorable()); // restore customs from the snapshot if (metadata.customs() != null) { @@ -1504,7 +1504,7 @@ private void applyGlobalStateRestore(ClusterState currentState, Metadata.Builder // Don't restore repositories while we are working with them // TODO: Should we restore them at the end? // Also, don't restore data streams here, we already added them to the metadata builder above - mdBuilder.putCustom(entry.getKey(), entry.getValue()); + mdBuilder.putSection(entry.getKey(), entry.getValue()); } } } diff --git a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java index 6d7404d7472e5..699c06638dc94 100644 --- a/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java +++ b/server/src/main/java/org/elasticsearch/snapshots/SnapshotsService.java @@ -3955,7 +3955,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionCo final SnapshotsInProgress initialSnapshots = SnapshotsInProgress.get(state); SnapshotsInProgress snapshotsInProgress = shardsUpdateContext.computeUpdatedState(); final RegisteredPolicySnapshots.Builder registeredPolicySnapshots = state.metadata() - .custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY) + .section(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY) .builder(); for (final var taskContext : batchExecutionContext.taskContexts()) { if (taskContext.getTask() instanceof CreateSnapshotTask task) { @@ -3988,7 +3988,7 @@ public ClusterState execute(BatchExecutionContext batchExecutionCo return ClusterState.builder(state) .putCustom(SnapshotsInProgress.TYPE, snapshotsInProgress) - .metadata(Metadata.builder(state.metadata()).putCustom(RegisteredPolicySnapshots.TYPE, registeredPolicySnapshots.build())) + .metadata(Metadata.builder(state.metadata()).putSection(RegisteredPolicySnapshots.TYPE, registeredPolicySnapshots.build())) .build(); } diff --git a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java index 04a0b3434814a..335375b056d64 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java +++ b/server/src/main/java/org/elasticsearch/upgrades/FeatureMigrationResults.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; @@ -34,7 +35,7 @@ * Holds the results of the most recent attempt to migrate system indices. Updated by {@link SystemIndexMigrator} as it finishes each * feature, or fails. */ -public class FeatureMigrationResults implements Metadata.Custom { +public class FeatureMigrationResults implements MetadataSection { public static final String TYPE = "system_index_migration"; public static final TransportVersion MIGRATION_ADDED_VERSION = TransportVersions.V_8_0_0; @@ -117,15 +118,15 @@ public int hashCode() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new ResultsDiff((FeatureMigrationResults) previousState, this); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new ResultsDiff(in); } - public static class ResultsDiff implements NamedDiff { + public static class ResultsDiff implements NamedDiff { private final Diff> resultsDiff; public ResultsDiff(FeatureMigrationResults before, FeatureMigrationResults after) { @@ -142,7 +143,7 @@ public ResultsDiff(StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { TreeMap newResults = new TreeMap<>( resultsDiff.apply(((FeatureMigrationResults) part).featureStatuses) ); diff --git a/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java b/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java index 7db4c7b8da651..925dc6f21aa65 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java +++ b/server/src/main/java/org/elasticsearch/upgrades/MigrationResultsUpdateTask.java @@ -71,12 +71,12 @@ private static void submitUnbatchedTask( @Override public ClusterState execute(ClusterState currentState) throws Exception { - FeatureMigrationResults currentResults = currentState.metadata().custom(FeatureMigrationResults.TYPE); + FeatureMigrationResults currentResults = currentState.metadata().section(FeatureMigrationResults.TYPE); if (currentResults == null) { currentResults = new FeatureMigrationResults(new HashMap<>()); } FeatureMigrationResults newResults = currentResults.withResult(featureName, status); - final Metadata newMetadata = Metadata.builder(currentState.metadata()).putCustom(FeatureMigrationResults.TYPE, newResults).build(); + final Metadata newMetadata = Metadata.builder(currentState.metadata()).putSection(FeatureMigrationResults.TYPE, newResults).build(); final ClusterState newState = ClusterState.builder(currentState).metadata(newMetadata).build(); return newState; } diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java index 24d30963d18d8..b0f1356e11cc4 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrationExecutor.java @@ -20,8 +20,8 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xcontent.NamedXContentRegistry; import org.elasticsearch.xcontent.ParseField; @@ -73,7 +73,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, + PersistentTasksMetadataSection.PersistentTask taskInProgress, Map headers ) { return new SystemIndexMigrator( @@ -93,7 +93,7 @@ protected AllocatedPersistentTask createTask( } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( SystemIndexMigrationTaskParams params, Collection candidateNodes, ClusterState clusterState @@ -106,7 +106,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( if (discoveryNode == null) { return NO_NODE_FOUND; } else { - return new PersistentTasksCustomMetadata.Assignment(discoveryNode.getId(), ""); + return new PersistentTasksMetadataSection.Assignment(discoveryNode.getId(), ""); } } diff --git a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java index 3dcdf558e6dfc..3eec06dacb84b 100644 --- a/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java +++ b/server/src/main/java/org/elasticsearch/upgrades/SystemIndexMigrator.java @@ -603,9 +603,9 @@ private static void clearResults(ClusterService clusterService, ActionListenerbuilder() diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java index 4b96e7d447475..1206904571934 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterChangedEventTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.IndexGraveyard; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; @@ -25,7 +26,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.TestCustomMetadata; +import org.elasticsearch.test.TestMetadataSection; import java.util.ArrayList; import java.util.Arrays; @@ -225,7 +226,7 @@ public void testChangedCustomMetadataSet() { final int numNodesInCluster = 3; final ClusterState originalState = createState(numNodesInCluster, randomBoolean(), initialIndices); - CustomMetadata1 customMetadata1 = new CustomMetadata1("data"); + MetadataSection1 customMetadata1 = new MetadataSection1("data"); final ClusterState stateWithCustomMetadata = nextState(originalState, Collections.singletonList(customMetadata1)); // no custom metadata present in any state @@ -247,7 +248,7 @@ public void testChangedCustomMetadataSet() { assertTrue(changedCustomMetadataTypeSet.isEmpty()); // next state has equivalent custom metadata - nextState = nextState(originalState, Collections.singletonList(new CustomMetadata1("data"))); + nextState = nextState(originalState, Collections.singletonList(new MetadataSection1("data"))); event = new ClusterChangedEvent("_na_", stateWithCustomMetadata, nextState); changedCustomMetadataTypeSet = event.changedCustomMetadataSet(); assertTrue(changedCustomMetadataTypeSet.isEmpty()); @@ -260,14 +261,14 @@ public void testChangedCustomMetadataSet() { assertTrue(changedCustomMetadataTypeSet.contains(customMetadata1.getWriteableName())); // next state updates custom metadata - nextState = nextState(stateWithCustomMetadata, Collections.singletonList(new CustomMetadata1("data1"))); + nextState = nextState(stateWithCustomMetadata, Collections.singletonList(new MetadataSection1("data1"))); event = new ClusterChangedEvent("_na_", stateWithCustomMetadata, nextState); changedCustomMetadataTypeSet = event.changedCustomMetadataSet(); assertTrue(changedCustomMetadataTypeSet.size() == 1); assertTrue(changedCustomMetadataTypeSet.contains(customMetadata1.getWriteableName())); // next state adds new custom metadata type - CustomMetadata2 customMetadata2 = new CustomMetadata2("data2"); + MetadataSection2 customMetadata2 = new MetadataSection2("data2"); nextState = nextState(stateWithCustomMetadata, Arrays.asList(customMetadata1, customMetadata2)); event = new ClusterChangedEvent("_na_", stateWithCustomMetadata, nextState); changedCustomMetadataTypeSet = event.changedCustomMetadataSet(); @@ -291,8 +292,8 @@ public void testChangedCustomMetadataSet() { assertTrue(changedCustomMetadataTypeSet.contains(customMetadata1.getWriteableName())); } - private static class CustomMetadata2 extends TestCustomMetadata { - protected CustomMetadata2(String data) { + private static class MetadataSection2 extends TestMetadataSection { + protected MetadataSection2(String data) { super(data); } @@ -312,8 +313,8 @@ public EnumSet context() { } } - private static class CustomMetadata1 extends TestCustomMetadata { - protected CustomMetadata1(String data) { + private static class MetadataSection1 extends TestMetadataSection { + protected MetadataSection1(String data) { super(data); } @@ -355,17 +356,17 @@ private static ClusterState createNonInitializedState(final int numNodes, final .build(); } - private static ClusterState nextState(final ClusterState previousState, List customMetadataList) { + private static ClusterState nextState(final ClusterState previousState, List customMetadataList) { final ClusterState.Builder builder = ClusterState.builder(previousState); builder.stateUUID(UUIDs.randomBase64UUID()); Metadata.Builder metadataBuilder = Metadata.builder(previousState.metadata()); - for (Map.Entry customMetadata : previousState.metadata().customs().entrySet()) { - if (customMetadata.getValue() instanceof TestCustomMetadata) { - metadataBuilder.removeCustom(customMetadata.getKey()); + for (Map.Entry customMetadata : previousState.metadata().customs().entrySet()) { + if (customMetadata.getValue() instanceof TestMetadataSection) { + metadataBuilder.removeSection(customMetadata.getKey()); } } - for (TestCustomMetadata testCustomMetadata : customMetadataList) { - metadataBuilder.putCustom(testCustomMetadata.getWriteableName(), testCustomMetadata); + for (TestMetadataSection testCustomMetadata : customMetadataList) { + metadataBuilder.putSection(testCustomMetadata.getWriteableName(), testCustomMetadata); } builder.metadata(metadataBuilder); return builder.build(); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java index 55cd6e5790f84..6599d025b93e0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterSnapshotStatsTests.java @@ -383,7 +383,7 @@ public void testComputation() { ClusterState.builder(ClusterState.EMPTY_STATE) .metadata( Metadata.builder() - .putCustom( + .putSection( RepositoriesMetadata.TYPE, new RepositoriesMetadata(List.of(new RepositoryMetadata("test-repo", "test-repo-type", Settings.EMPTY))) ) diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java index b57badb3a180f..57426d05b4ab0 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/CoordinatorTests.java @@ -1575,7 +1575,7 @@ public void testSingleNodeDiscoveryStabilisesEvenWhenDisrupted() { } } - private static class BrokenCustom implements SimpleDiffable, ClusterState.Custom { + private static class BrokenSection implements SimpleDiffable, ClusterState.Custom { static final String EXCEPTION_MESSAGE = "simulated"; @@ -1611,11 +1611,15 @@ public void testClusterRecoversAfterExceptionDuringSerialization() { logger.info("--> submitting broken task to [{}]", leader1); final AtomicBoolean failed = new AtomicBoolean(); - leader1.submitUpdateTask("broken-task", cs -> ClusterState.builder(cs).putCustom("broken", new BrokenCustom()).build(), (e) -> { - assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); - assertThat(e.getCause().getMessage(), equalTo(BrokenCustom.EXCEPTION_MESSAGE)); - failed.set(true); - }); + leader1.submitUpdateTask( + "broken-task", + cs -> ClusterState.builder(cs).putCustom("broken", new BrokenSection()).build(), + (e) -> { + assertThat(e.getCause(), instanceOf(ElasticsearchException.class)); + assertThat(e.getCause().getMessage(), equalTo(BrokenSection.EXCEPTION_MESSAGE)); + failed.set(true); + } + ); // allow for forking 3 times: // - once onto the master-service thread // - once to fork the publication in FakeThreadPoolMasterService diff --git a/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandTests.java b/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandTests.java index b0aef332717f6..7dcbeee19f70c 100644 --- a/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/coordination/ElasticsearchNodeCommandTests.java @@ -73,8 +73,8 @@ private void runLoadStateTest(boolean hasMissingCustoms, boolean preserveUnknown // make sure the index tombstones are the same too if (hasMissingCustoms) { - assertNotNull(loadedMetadata.custom(IndexGraveyard.TYPE)); - assertThat(loadedMetadata.custom(IndexGraveyard.TYPE), instanceOf(ElasticsearchNodeCommand.UnknownMetadataCustom.class)); + assertNotNull(loadedMetadata.section(IndexGraveyard.TYPE)); + assertThat(loadedMetadata.section(IndexGraveyard.TYPE), instanceOf(ElasticsearchNodeCommand.UnknownMetadataSection.class)); if (preserveUnknownCustoms) { // check that we reserialize unknown metadata correctly again diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadataSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadataSerializationTests.java index 717e74f2878c0..a7a17467fe232 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadataSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesMetadataSerializationTests.java @@ -20,9 +20,9 @@ import static org.elasticsearch.cluster.metadata.DesiredNodesSerializationTests.mutateDesiredNodes; import static org.elasticsearch.cluster.metadata.DesiredNodesTestCase.randomDesiredNodes; -public class DesiredNodesMetadataSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { +public class DesiredNodesMetadataSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { @Override - protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + protected MetadataSection makeTestChanges(MetadataSection testInstance) { if (randomBoolean()) { return testInstance; } @@ -30,17 +30,17 @@ protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { } @Override - protected Writeable.Reader> diffReader() { + protected Writeable.Reader> diffReader() { return DesiredNodesMetadata::readDiffFrom; } @Override - protected Metadata.Custom doParseInstance(XContentParser parser) throws IOException { + protected MetadataSection doParseInstance(XContentParser parser) throws IOException { return DesiredNodesMetadata.fromXContent(parser); } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return DesiredNodesMetadata::new; } @@ -48,18 +48,18 @@ protected Writeable.Reader instanceReader() { protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( Collections.singletonList( - new NamedWriteableRegistry.Entry(Metadata.Custom.class, DesiredNodesMetadata.TYPE, DesiredNodesMetadata::new) + new NamedWriteableRegistry.Entry(MetadataSection.class, DesiredNodesMetadata.TYPE, DesiredNodesMetadata::new) ) ); } @Override - protected Metadata.Custom createTestInstance() { + protected MetadataSection createTestInstance() { return randomDesiredNodesMetadata(); } @Override - protected Metadata.Custom mutateInstance(Metadata.Custom instance) { + protected MetadataSection mutateInstance(MetadataSection instance) { return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java index d99d787a1d243..cc2a41d204e2a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/DesiredNodesTestCase.java @@ -162,7 +162,7 @@ public static ClusterState createClusterStateWithDiscoveryNodesAndDesiredNodes( return ClusterState.builder(ClusterName.DEFAULT) .nodes(discoveryNodes) - .metadata(Metadata.builder().putCustom(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(desiredNodes))) + .metadata(Metadata.builder().putSection(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(desiredNodes))) .build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java index 99f78f95dd36c..4323cd2811a78 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/IndexMetadataVerifierTests.java @@ -87,7 +87,7 @@ public void testDeleteBrokenSystemIndexSettings() { assertSame(indexMetadata, src); } - public void testCustomSimilarity() { + public void testSectionSimilarity() { IndexMetadataVerifier service = getIndexMetadataVerifier(); IndexMetadata src = newIndexMeta( "foo", diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java index f5daac8ecd090..7b309a7920cdb 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataIndexTemplateServiceTests.java @@ -1022,7 +1022,7 @@ public void testFindV2InvalidGlobalTemplate() { .version(1L) .build(); Metadata invalidGlobalTemplateMetadata = Metadata.builder() - .putCustom( + .putSection( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata(Map.of("invalid_global_template", invalidGlobalTemplate)) ) diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java index 955d7d2de6882..c860d6afcb794 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/MetadataTests.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.alias.RandomAliasActionsGenerator; import org.elasticsearch.index.mapper.MapperService; import org.elasticsearch.ingest.IngestMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.FieldPredicate; import org.elasticsearch.plugins.MapperPlugin; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; @@ -1052,18 +1052,18 @@ public void testTransientSettingsOverridePersistentSettings() { assertThat(setting.get(metadata.settings()), equalTo("transient-value")); } - public void testBuilderRejectsNullCustom() { + public void testBuilderRejectsNullSection() { final Metadata.Builder builder = Metadata.builder(); final String key = randomAlphaOfLength(10); - assertThat(expectThrows(NullPointerException.class, () -> builder.putCustom(key, null)).getMessage(), containsString(key)); + assertThat(expectThrows(NullPointerException.class, () -> builder.putSection(key, null)).getMessage(), containsString(key)); } public void testBuilderRejectsNullInCustoms() { final Metadata.Builder builder = Metadata.builder(); final String key = randomAlphaOfLength(10); - final Map map = new HashMap<>(); + final Map map = new HashMap<>(); map.put(key, null); - assertThat(expectThrows(NullPointerException.class, () -> builder.customs(map)).getMessage(), containsString(key)); + assertThat(expectThrows(NullPointerException.class, () -> builder.sections(map)).getMessage(), containsString(key)); } public void testCopyAndUpdate() throws IOException { @@ -1076,18 +1076,18 @@ public void testCopyAndUpdate() throws IOException { assertThat(copy.clusterUUID(), equalTo(newClusterUuid)); } - public void testBuilderRemoveCustomIf() { - var custom1 = new TestCustomMetadata(); - var custom2 = new TestCustomMetadata(); + public void testBuilderRemoveSectionIf() { + var custom1 = new TestMetadataSection(); + var custom2 = new TestMetadataSection(); var builder = Metadata.builder(); - builder.putCustom("custom1", custom1); - builder.putCustom("custom2", custom2); + builder.putSection("custom1", custom1); + builder.putSection("custom2", custom2); - builder.removeCustomIf((key, value) -> Objects.equals(key, "custom1")); + builder.removeSectionIf((key, value) -> Objects.equals(key, "custom1")); var metadata = builder.build(); - assertThat(metadata.custom("custom1"), nullValue()); - assertThat(metadata.custom("custom2"), sameInstance(custom2)); + assertThat(metadata.section("custom1"), nullValue()); + assertThat(metadata.section("custom2"), sameInstance(custom2)); } public void testBuilderRejectsDataStreamThatConflictsWithIndex() { @@ -2272,33 +2272,33 @@ public static int expectedChunkCount(ToXContent.Params params, Metadata metadata // 1 chunk for each index + 2 to wrap the indices field chunkCount += 2 + metadata.indices().size(); - for (Metadata.Custom custom : metadata.customs().values()) { + for (MetadataSection section : metadata.customs().values()) { chunkCount += 2; - if (custom instanceof ComponentTemplateMetadata componentTemplateMetadata) { + if (section instanceof ComponentTemplateMetadata componentTemplateMetadata) { chunkCount += 2 + componentTemplateMetadata.componentTemplates().size(); - } else if (custom instanceof ComposableIndexTemplateMetadata composableIndexTemplateMetadata) { + } else if (section instanceof ComposableIndexTemplateMetadata composableIndexTemplateMetadata) { chunkCount += 2 + composableIndexTemplateMetadata.indexTemplates().size(); - } else if (custom instanceof DataStreamMetadata dataStreamMetadata) { + } else if (section instanceof DataStreamMetadata dataStreamMetadata) { chunkCount += 4 + dataStreamMetadata.dataStreams().size() + dataStreamMetadata.getDataStreamAliases().size(); - } else if (custom instanceof DesiredNodesMetadata) { + } else if (section instanceof DesiredNodesMetadata) { chunkCount += 1; - } else if (custom instanceof FeatureMigrationResults featureMigrationResults) { + } else if (section instanceof FeatureMigrationResults featureMigrationResults) { chunkCount += 2 + featureMigrationResults.getFeatureStatuses().size(); - } else if (custom instanceof IndexGraveyard indexGraveyard) { + } else if (section instanceof IndexGraveyard indexGraveyard) { chunkCount += 2 + indexGraveyard.getTombstones().size(); - } else if (custom instanceof IngestMetadata ingestMetadata) { + } else if (section instanceof IngestMetadata ingestMetadata) { chunkCount += 2 + ingestMetadata.getPipelines().size(); - } else if (custom instanceof NodesShutdownMetadata nodesShutdownMetadata) { + } else if (section instanceof NodesShutdownMetadata nodesShutdownMetadata) { chunkCount += 2 + nodesShutdownMetadata.getAll().size(); - } else if (custom instanceof PersistentTasksCustomMetadata persistentTasksCustomMetadata) { + } else if (section instanceof PersistentTasksMetadataSection persistentTasksCustomMetadata) { chunkCount += 3 + persistentTasksCustomMetadata.tasks().size(); - } else if (custom instanceof RepositoriesMetadata repositoriesMetadata) { + } else if (section instanceof RepositoriesMetadata repositoriesMetadata) { chunkCount += repositoriesMetadata.repositories().size(); } else { // could be anything, we have to just try it chunkCount += Iterables.size( - (Iterable) (() -> Iterators.map(custom.toXContentChunked(params), Function.identity())) + (Iterable) (() -> Iterators.map(section.toXContentChunked(params), Function.identity())) ); } } @@ -2327,7 +2327,7 @@ public void testEnsureMetadataFieldCheckedForGlobalStateChanges() { "templates", "clusterUUID", "clusterUUIDCommitted", - "customs", + "sections", "reservedStateMetadata" ); Set excludedFromGlobalStateCheck = Set.of( @@ -2470,7 +2470,7 @@ private static class CreateIndexResult { } } - private static class TestCustomMetadata implements Metadata.Custom { + private static class TestMetadataSection implements MetadataSection { @Override public Iterator toXContentChunked(ToXContent.Params params) { @@ -2478,7 +2478,7 @@ public Iterator toXContentChunked(ToXContent.Params params } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return null; } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java index 881e84f07af9a..56058335b3f40 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/NodesShutdownMetadataTests.java @@ -38,7 +38,7 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.nullValue; -public class NodesShutdownMetadataTests extends ChunkedToXContentDiffableSerializationTestCase { +public class NodesShutdownMetadataTests extends ChunkedToXContentDiffableSerializationTestCase { public void testInsertNewNodeShutdownMetadata() { NodesShutdownMetadata nodesShutdownMetadata = new NodesShutdownMetadata(new HashMap<>()); @@ -93,7 +93,7 @@ public void testIsNodeShuttingDown() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).build(); state = ClusterState.builder(state) - .metadata(Metadata.builder(state.metadata()).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata).build()) + .metadata(Metadata.builder(state.metadata()).putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata).build()) .nodes(DiscoveryNodes.builder(state.nodes()).add(DiscoveryNodeUtils.create("_node_1")).build()) .build(); @@ -156,7 +156,7 @@ public void testIsNodeMarkedForRemoval() { } @Override - protected Writeable.Reader> diffReader() { + protected Writeable.Reader> diffReader() { return NodesShutdownMetadata.NodeShutdownMetadataDiff::new; } @@ -166,7 +166,7 @@ protected NodesShutdownMetadata doParseInstance(XContentParser parser) throws IO } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return NodesShutdownMetadata::new; } @@ -195,12 +195,12 @@ private SingleNodeShutdownMetadata randomNodeShutdownInfo() { } @Override - protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + protected MetadataSection makeTestChanges(MetadataSection testInstance) { return randomValueOtherThan(testInstance, this::createTestInstance); } @Override - protected Metadata.Custom mutateInstance(Metadata.Custom instance) { + protected MetadataSection mutateInstance(MetadataSection instance) { return makeTestChanges(instance); } } diff --git a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java index 517d014c12723..4bf7ab1431fa7 100644 --- a/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/metadata/ToAndFromJsonMetadataTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.xcontent.ChunkedToXContent; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.TestCustomMetadata; +import org.elasticsearch.test.TestMetadataSection; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xcontent.json.JsonXContent; @@ -876,10 +876,10 @@ private Metadata buildMetadata() throws IOException { .build(); } - public static class CustomMetadata extends TestCustomMetadata { + public static class MetadataSection extends TestMetadataSection { public static final String TYPE = "custom_md"; - CustomMetadata(String data) { + MetadataSection(String data) { super(data); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java index 3a5aab9e80133..059721e5c8a9d 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/DiskThresholdMonitorTests.java @@ -901,7 +901,7 @@ protected void updateIndicesReadOnly(Set indicesToUpdate, Releasable onC .metadata( Metadata.builder(clusterState.metadata()) .put(indexMetadata, true) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( @@ -982,7 +982,7 @@ protected void updateIndicesReadOnly(Set indicesToUpdate, Releasable onC assertNull(indicesToRelease.get()); final ClusterState clusterStateNoShutdown = ClusterState.builder(clusterState) - .metadata(Metadata.builder(clusterState.metadata()).put(indexMetadata, true).removeCustom(NodesShutdownMetadata.TYPE).build()) + .metadata(Metadata.builder(clusterState.metadata()).put(indexMetadata, true).removeSection(NodesShutdownMetadata.TYPE).build()) .blocks(ClusterBlocks.builder().addBlocks(indexMetadata).build()) .build(); @@ -1285,7 +1285,7 @@ public void testSkipDiskThresholdMonitorWhenStateNotRecovered() { .put(IndexMetadata.builder("test").settings(settings(IndexVersion.current())).numberOfShards(1).numberOfReplicas(1)); boolean shutdownMetadataInState = randomBoolean(); if (shutdownMetadataInState) { - metadataBuilder.putCustom( + metadataBuilder.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java index 615405645d6b2..c614745e04a6a 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceReconcilerTests.java @@ -914,7 +914,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingAllocation allocat // filter forbids this final var shuttingDownState = allocationService.reroute( clusterState.copyAndUpdateMetadata( - tmpMetadata -> tmpMetadata.putCustom( + tmpMetadata -> tmpMetadata.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java index 130d2e41aa374..c57268dee6608 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/allocator/DesiredBalanceShardsAllocatorTests.java @@ -707,7 +707,7 @@ public void resetDesiredBalance() { final var nodeShutdownMetadata = new NodesShutdownMetadata(Map.of(node2.getId(), singleShutdownMetadataBuilder.build())); // Add shutdown marker clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder(clusterState.metadata()).putCustom(NodesShutdownMetadata.TYPE, nodeShutdownMetadata)) + .metadata(Metadata.builder(clusterState.metadata()).putSection(NodesShutdownMetadata.TYPE, nodeShutdownMetadata)) .build(); assertTrue(desiredBalanceAllocator.getProcessedNodeShutdowns().isEmpty()); rerouteAndWait(service, clusterState, "reroute-after-shutdown"); @@ -729,7 +729,7 @@ public void resetDesiredBalance() { assertFalse("desired balance reset should not be called again for processed shutdowns", resetCalled.get()); // Remove the shutdown marker clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder(clusterState.metadata()).putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY)) + .metadata(Metadata.builder(clusterState.metadata()).putSection(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY)) .build(); rerouteAndWait(service, clusterState, "random-reroute"); if (removeNodeFromCluster) { diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java index 9256b5b5caadf..a936fce56d992 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeReplacementAllocationDeciderTests.java @@ -195,7 +195,7 @@ public void testShouldNotAutoExpandReplicasDuringUnrelatedNodeReplacement() { .metadata( Metadata.builder() .put(IndexMetadata.builder(indexMetadata)) - .putCustom(NodesShutdownMetadata.TYPE, createNodeShutdownReplacementMetadata(NODE_A.getId(), NODE_B.getName())) + .putSection(NodesShutdownMetadata.TYPE, createNodeShutdownReplacementMetadata(NODE_A.getId(), NODE_B.getName())) ) .routingTable( RoutingTable.builder() @@ -305,7 +305,7 @@ public void testShouldNotContractAutoExpandReplicasDuringNodeReplacement() { state = ClusterState.builder(state) .metadata( Metadata.builder(state.metadata()) - .putCustom(NodesShutdownMetadata.TYPE, createNodeShutdownReplacementMetadata(NODE_A.getId(), NODE_B.getName())) + .putSection(NodesShutdownMetadata.TYPE, createNodeShutdownReplacementMetadata(NODE_A.getId(), NODE_B.getName())) .build() ) .build(); @@ -410,7 +410,7 @@ private ClusterState prepareState(String sourceNodeId, String targetNodeName) { .metadata( Metadata.builder() .put(IndexMetadata.builder(indexMetadata)) - .putCustom(NodesShutdownMetadata.TYPE, createNodeShutdownReplacementMetadata(sourceNodeId, targetNodeName)) + .putSection(NodesShutdownMetadata.TYPE, createNodeShutdownReplacementMetadata(sourceNodeId, targetNodeName)) ) .build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java index c36e3da7600df..00589127ee895 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/NodeShutdownAllocationDeciderTests.java @@ -181,7 +181,7 @@ public void testAutoExpandDuringNodeReplacement() { // should auto-expand to source when shutdown/replacement entry is registered and node replacement has not started var shutdown = createNodesShutdownMetadata(SingleNodeShutdownMetadata.Type.REPLACE, DATA_NODE.getId()); state = ClusterState.builder(state) - .metadata(Metadata.builder(state.metadata()).putCustom(NodesShutdownMetadata.TYPE, shutdown).build()) + .metadata(Metadata.builder(state.metadata()).putSection(NodesShutdownMetadata.TYPE, shutdown).build()) .build(); assertThatDecision( decider.shouldAutoExpandToNode(indexMetadata, DATA_NODE, createRoutingAllocation(state)), @@ -216,7 +216,7 @@ private ClusterState prepareState(SingleNodeShutdownMetadata.Type shutdownType, .metadata( Metadata.builder() .put(IndexMetadata.builder(indexMetadata)) - .putCustom(NodesShutdownMetadata.TYPE, createNodesShutdownMetadata(shutdownType, nodeId)) + .putSection(NodesShutdownMetadata.TYPE, createNodesShutdownMetadata(shutdownType, nodeId)) ) .build(); } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java index b9bf565ee58cd..7040ddb8592ff 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/decider/SnapshotInProgressAllocationDeciderTests.java @@ -208,7 +208,7 @@ public void testYesWhenSnapshotInProgressButShardIsPausedDueToShutdown() { final var clusterStateWithShutdownMetadata = SnapshotsInProgressSerializationTests.CLUSTER_STATE_FOR_NODE_SHUTDOWNS .copyAndUpdateMetadata( - mdb -> mdb.putCustom( + mdb -> mdb.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java index 0e3041dda9853..eeaa86eb3e740 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/shards/ShardsAvailabilityHealthIndicatorServiceTests.java @@ -2153,7 +2153,7 @@ private static ClusterState createClusterStateWith( indexMetadataMap.put(indexMetadata.getIndex().getName(), indexMetadata); } metadataBuilder.indices(indexMetadataMap); - metadataBuilder.putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata); + metadataBuilder.putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata); DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder(); nodes.forEach(discoveryNodesBuilder::add); diff --git a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java index 1bae3ca59f3d9..4f7bfb31342fd 100644 --- a/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/serialization/ClusterSerializationTests.java @@ -378,16 +378,16 @@ public void testObjectReuseWhenApplyingClusterStateDiff() throws Exception { ); } - public static class TestCustomOne extends AbstractNamedDiffable implements Custom { + public static class TestSectionOne extends AbstractNamedDiffable implements Custom { public static final String TYPE = "test_custom_one"; private final String strObject; - public TestCustomOne(String strObject) { + public TestSectionOne(String strObject) { this.strObject = strObject; } - public TestCustomOne(StreamInput in) throws IOException { + public TestSectionOne(StreamInput in) throws IOException { this.strObject = in.readString(); } @@ -421,16 +421,16 @@ public TransportVersion getMinimalSupportedVersion() { } - public static class TestCustomTwo extends AbstractNamedDiffable implements Custom { + public static class TestSectionTwo extends AbstractNamedDiffable implements Custom { public static final String TYPE = "test_custom_two"; private final Integer intObject; - public TestCustomTwo(Integer intObject) { + public TestSectionTwo(Integer intObject) { this.intObject = intObject; } - public TestCustomTwo(StreamInput in) throws IOException { + public TestSectionTwo(StreamInput in) throws IOException { this.intObject = in.readInt(); } @@ -466,8 +466,8 @@ public TransportVersion getMinimalSupportedVersion() { public void testCustomSerialization() throws Exception { ClusterState.Builder builder = ClusterState.builder(ClusterState.EMPTY_STATE) - .putCustom(TestCustomOne.TYPE, new TestCustomOne("test_custom_one")) - .putCustom(TestCustomTwo.TYPE, new TestCustomTwo(10)); + .putCustom(TestSectionOne.TYPE, new TestSectionOne("test_custom_one")) + .putCustom(TestSectionTwo.TYPE, new TestSectionTwo(10)); ClusterState clusterState = builder.incrementVersion().build(); @@ -475,10 +475,10 @@ public void testCustomSerialization() throws Exception { // Add the new customs to named writeables final List entries = ClusterModule.getNamedWriteables(); - entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestCustomOne.TYPE, TestCustomOne::new)); - entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestCustomOne.TYPE, TestCustomOne::readDiffFrom)); - entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestCustomTwo.TYPE, TestCustomTwo::new)); - entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestCustomTwo.TYPE, TestCustomTwo::readDiffFrom)); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestSectionOne.TYPE, TestSectionOne::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestSectionOne.TYPE, TestSectionOne::readDiffFrom)); + entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TestSectionTwo.TYPE, TestSectionTwo::new)); + entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, TestSectionTwo.TYPE, TestSectionTwo::readDiffFrom)); // serialize with current version BytesStreamOutput outStream = new BytesStreamOutput(); @@ -493,8 +493,8 @@ public void testCustomSerialization() throws Exception { ClusterState stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); // Current version - Both the customs are non null - assertThat(stateAfterDiffs.custom(TestCustomOne.TYPE), notNullValue()); - assertThat(stateAfterDiffs.custom(TestCustomTwo.TYPE), notNullValue()); + assertThat(stateAfterDiffs.custom(TestSectionOne.TYPE), notNullValue()); + assertThat(stateAfterDiffs.custom(TestSectionTwo.TYPE), notNullValue()); // serialize with minimum compatibile version outStream = new BytesStreamOutput(); @@ -509,8 +509,8 @@ public void testCustomSerialization() throws Exception { stateAfterDiffs = serializedDiffs.apply(ClusterState.EMPTY_STATE); // Old version - TestCustomOne is null and TestCustomTwo is not null - assertThat(stateAfterDiffs.custom(TestCustomOne.TYPE), nullValue()); - assertThat(stateAfterDiffs.custom(TestCustomTwo.TYPE), notNullValue()); + assertThat(stateAfterDiffs.custom(TestSectionOne.TYPE), nullValue()); + assertThat(stateAfterDiffs.custom(TestSectionTwo.TYPE), notNullValue()); } } diff --git a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java index 4e12627a158da..92fb6143565f2 100644 --- a/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/GatewayMetaStateTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.plugins.ClusterCoordinationPlugin; import org.elasticsearch.plugins.MetadataUpgrader; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.TestCustomMetadata; +import org.elasticsearch.test.TestMetadataSection; import java.io.IOException; import java.util.Arrays; @@ -55,7 +55,7 @@ public void testUpdateTemplateMetadataOnUpgrade() { } public void testNoMetadataUpgrade() { - Metadata metadata = randomMetadata(new CustomMetadata1("data")); + Metadata metadata = randomMetadata(new MetadataSection1("data")); MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertSame(upgrade, metadata); @@ -66,7 +66,7 @@ public void testNoMetadataUpgrade() { } public void testCustomMetadataValidation() { - Metadata metadata = randomMetadata(new CustomMetadata1("data")); + Metadata metadata = randomMetadata(new MetadataSection1("data")); MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.emptyList()); try { GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); @@ -87,7 +87,7 @@ public void testIndexMetadataUpgrade() { } public void testCustomMetadataNoChange() { - Metadata metadata = randomMetadata(new CustomMetadata1("data")); + Metadata metadata = randomMetadata(new MetadataSection1("data")); MetadataUpgrader metadataUpgrader = new MetadataUpgrader(Collections.singletonList(HashMap::new)); Metadata upgrade = GatewayMetaState.upgradeMetadata(metadata, new MockIndexMetadataVerifier(false), metadataUpgrader); assertSame(upgrade, metadata); @@ -204,10 +204,10 @@ public IndexMetadata verifyIndexMetadata(IndexMetadata indexMetadata, IndexVersi } } - private static class CustomMetadata1 extends TestCustomMetadata { + private static class MetadataSection1 extends TestMetadataSection { public static final String TYPE = "custom_md_1"; - CustomMetadata1(String data) { + MetadataSection1(String data) { super(data); } @@ -227,10 +227,10 @@ public EnumSet context() { } } - private static Metadata randomMetadata(TestCustomMetadata... customMetadatas) { + private static Metadata randomMetadata(TestMetadataSection... customMetadatas) { Metadata.Builder builder = Metadata.builder(); - for (TestCustomMetadata customMetadata : customMetadatas) { - builder.putCustom(customMetadata.getWriteableName(), customMetadata); + for (TestMetadataSection customMetadata : customMetadatas) { + builder.putSection(customMetadata.getWriteableName(), customMetadata); } for (int i = 0; i < randomIntBetween(1, 5); i++) { builder.put( diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java index 1227239277ef8..fd272c033efaa 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTaskExecutorTests.java @@ -23,7 +23,7 @@ import org.elasticsearch.features.FeatureService; import org.elasticsearch.health.HealthFeatures; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.TestThreadPool; @@ -204,16 +204,17 @@ private ClusterState stateWithNodeShuttingDown(ClusterState clusterState, Single ); return ClusterState.builder(clusterState) - .metadata(Metadata.builder(clusterState.metadata()).putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata).build()) + .metadata(Metadata.builder(clusterState.metadata()).putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata).build()) .build(); } private ClusterState stateWithHealthNodeSelectorTask(ClusterState clusterState) { ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); tasks.addTask(HealthNode.TASK_NAME, HealthNode.TASK_NAME, new HealthNodeTaskParams(), NO_NODE_FOUND); - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); return builder.metadata(metadata).build(); } } diff --git a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java index 5aec03b052cb6..81e26aff134ea 100644 --- a/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java +++ b/server/src/test/java/org/elasticsearch/health/node/selection/HealthNodeTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import java.util.Set; @@ -54,10 +54,10 @@ public void testFindHealthNodeNoTask() { } public void testfindHealthNodeNoAssignment() { - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); tasks.addTask(HealthNode.TASK_NAME, HealthNode.TASK_NAME, HealthNodeTaskParams.INSTANCE, NO_NODE_FOUND); ClusterState state = ClusterStateCreationUtils.state(node1, node1, allNodes) - .copyAndUpdateMetadata(b -> b.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build())); + .copyAndUpdateMetadata(b -> b.putSection(PersistentTasksMetadataSection.TYPE, tasks.build())); assertThat(HealthNode.findHealthNode(state), nullValue()); } diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java index 7d6dd2d4e23ab..33d923f9e6e8a 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestMetadataTests.java @@ -9,7 +9,7 @@ package org.elasticsearch.ingest; import org.elasticsearch.cluster.DiffableUtils; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.util.Maps; @@ -52,9 +52,9 @@ public void testFromXContent() throws IOException { builder.endObject(); XContentBuilder shuffled = shuffleXContent(builder); try (XContentParser parser = createParser(shuffled)) { - Metadata.Custom custom = IngestMetadata.fromXContent(parser); - assertTrue(custom instanceof IngestMetadata); - IngestMetadata m = (IngestMetadata) custom; + MetadataSection section = IngestMetadata.fromXContent(parser); + assertTrue(section instanceof IngestMetadata); + IngestMetadata m = (IngestMetadata) section; assertEquals(2, m.getPipelines().size()); assertEquals("1", m.getPipelines().get("1").getId()); assertEquals("2", m.getPipelines().get("2").getId()); diff --git a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java index bc81614c9e237..282505813825c 100644 --- a/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java +++ b/server/src/test/java/org/elasticsearch/ingest/IngestServiceTests.java @@ -233,7 +233,7 @@ public void testUpdatePipelines() { {"processors": [{"set" : {"field": "_field", "value": "_value"}}]}"""), XContentType.JSON); IngestMetadata ingestMetadata = new IngestMetadata(Map.of("_id", pipeline)); clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); assertThat(ingestService.pipelines().size(), is(1)); @@ -353,7 +353,7 @@ public void extraValidation() throws Exception { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -376,7 +376,7 @@ public void extraValidation() throws Exception { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); @@ -396,7 +396,7 @@ public void testDelete() { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); assertThat(ingestService.getPipeline("_id"), notNullValue()); @@ -793,7 +793,7 @@ public void testDeleteUsingWildcard() { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); assertThat(ingestService.getPipeline("p1"), notNullValue()); @@ -842,7 +842,7 @@ public void testDeleteWithExistingUnmatchedPipelines() { ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); ClusterState previousClusterState = clusterState; clusterState = ClusterState.builder(clusterState) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .build(); ingestService.applyClusterState(new ClusterChangedEvent("", clusterState, previousClusterState)); assertThat(ingestService.getPipeline("p1"), notNullValue()); @@ -867,7 +867,7 @@ public void testDeleteWithIndexUsePipeline() { true ); } - builder.putCustom(IngestMetadata.TYPE, ingestMetadata); + builder.putSection(IngestMetadata.TYPE, ingestMetadata); Metadata metadata = builder.build(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).build(); @@ -2660,7 +2660,7 @@ private void testUpdatingPipeline(String pipelineString) throws Exception { var pipelineId = randomAlphaOfLength(5); var existingPipeline = new PipelineConfiguration(pipelineId, new BytesArray(pipelineString), XContentType.JSON); var clusterState = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) .build(); Client client = mock(Client.class); @@ -2754,7 +2754,7 @@ public void testPutPipelineWithVersionedUpdateDoesNotMatchExistingPipeline() { var pipelineString = "{\"version\": " + version + ", \"processors\": []}"; var existingPipeline = new PipelineConfiguration(pipelineId, new BytesArray(pipelineString), XContentType.JSON); var clusterState = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) .build(); final Integer requestedVersion = randomValueOtherThan(version, ESTestCase::randomInt); @@ -2780,7 +2780,7 @@ public void testPutPipelineWithVersionedUpdateSpecifiesSameVersion() throws Exce var pipelineString = "{\"version\": " + version + ", \"processors\": []}"; var existingPipeline = new PipelineConfiguration(pipelineId, new BytesArray(pipelineString), XContentType.JSON); var clusterState = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) .build(); var request = new PutPipelineRequest(pipelineId, new BytesArray(pipelineString), XContentType.JSON, version); @@ -2794,7 +2794,7 @@ public void testPutPipelineWithVersionedUpdateSpecifiesValidVersion() throws Exc var pipelineString = "{\"version\": " + existingVersion + ", \"processors\": []}"; var existingPipeline = new PipelineConfiguration(pipelineId, new BytesArray(pipelineString), XContentType.JSON); var clusterState = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) .build(); final int specifiedVersion = randomValueOtherThan(existingVersion, ESTestCase::randomInt); @@ -2802,7 +2802,7 @@ public void testPutPipelineWithVersionedUpdateSpecifiesValidVersion() throws Exc var request = new PutPipelineRequest(pipelineId, new BytesArray(updatedPipelineString), XContentType.JSON, existingVersion); var updatedState = executePut(request, clusterState); - var updatedConfig = ((IngestMetadata) updatedState.metadata().custom(IngestMetadata.TYPE)).getPipelines().get(pipelineId); + var updatedConfig = ((IngestMetadata) updatedState.metadata().section(IngestMetadata.TYPE)).getPipelines().get(pipelineId); assertThat(updatedConfig, notNullValue()); assertThat(updatedConfig.getVersion(), equalTo(specifiedVersion)); } @@ -2813,14 +2813,14 @@ public void testPutPipelineWithVersionedUpdateIncrementsVersion() throws Excepti var pipelineString = "{\"version\": " + existingVersion + ", \"processors\": []}"; var existingPipeline = new PipelineConfiguration(pipelineId, new BytesArray(pipelineString), XContentType.JSON); var clusterState = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, new IngestMetadata(Map.of(pipelineId, existingPipeline))).build()) .build(); var updatedPipelineString = "{\"processors\": []}"; var request = new PutPipelineRequest(pipelineId, new BytesArray(updatedPipelineString), XContentType.JSON, existingVersion); var updatedState = executePut(request, clusterState); - var updatedConfig = ((IngestMetadata) updatedState.metadata().custom(IngestMetadata.TYPE)).getPipelines().get(pipelineId); + var updatedConfig = ((IngestMetadata) updatedState.metadata().section(IngestMetadata.TYPE)).getPipelines().get(pipelineId); assertThat(updatedConfig, notNullValue()); assertThat(updatedConfig.getVersion(), equalTo(existingVersion + 1)); } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java index 199e72c3c3311..91bd9826ea4c5 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksClusterServiceTests.java @@ -31,8 +31,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; @@ -145,12 +145,12 @@ public void testReassignmentRequiredOnMetadataChanges() { .build(); boolean unassigned = randomBoolean(); - PersistentTasksCustomMetadata tasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection tasks = PersistentTasksMetadataSection.builder() .addTask("_task_1", TestPersistentTasksExecutor.NAME, null, new Assignment(unassigned ? null : "_node", "_reason")) .build(); Metadata metadata = Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, tasks) + .putSection(PersistentTasksMetadataSection.TYPE, tasks) .persistentSettings( Settings.builder() .put(EnableAssignmentDecider.CLUSTER_TASKS_ALLOCATION_ENABLE_SETTING.getKey(), allocation.toString()) @@ -191,14 +191,14 @@ public void testReassignmentRequiredOnMetadataChanges() { public void testReassignTasksWithNoTasks() { ClusterState clusterState = initialState(); - assertThat(reassign(clusterState).metadata().custom(PersistentTasksCustomMetadata.TYPE), nullValue()); + assertThat(reassign(clusterState).metadata().section(PersistentTasksMetadataSection.TYPE), nullValue()); } public void testReassignConsidersClusterStateUpdates() { ClusterState clusterState = initialState(); ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); addTestNodes(nodes, randomIntBetween(1, 10)); @@ -207,30 +207,32 @@ public void testReassignConsidersClusterStateUpdates() { addTask(tasks, "assign_one", randomBoolean() ? null : "no_longer_exists"); } - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); clusterState = builder.metadata(metadata).nodes(nodes).build(); ClusterState newClusterState = reassign(clusterState); - PersistentTasksCustomMetadata tasksInProgress = newClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = newClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); } public void testNonClusterStateConditionAssignment() { ClusterState clusterState = initialState(); ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); addTestNodes(nodes, randomIntBetween(1, 3)); addTask(tasks, "assign_based_on_non_cluster_state_condition", null); - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); clusterState = builder.metadata(metadata).nodes(nodes).build(); nonClusterStateCondition = false; ClusterState newClusterState = reassign(clusterState); - PersistentTasksCustomMetadata tasksInProgress = newClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = newClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); for (PersistentTask task : tasksInProgress.tasks()) { assertThat(task.getExecutorNode(), nullValue()); @@ -242,7 +244,7 @@ public void testNonClusterStateConditionAssignment() { nonClusterStateCondition = true; ClusterState finalClusterState = reassign(newClusterState); - tasksInProgress = finalClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + tasksInProgress = finalClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); for (PersistentTask task : tasksInProgress.tasks()) { assertThat(task.getExecutorNode(), notNullValue()); @@ -255,8 +257,8 @@ public void testNonClusterStateConditionAssignment() { public void testReassignTasks() { ClusterState clusterState = initialState(); ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); addTestNodes(nodes, randomIntBetween(1, 10)); @@ -272,11 +274,12 @@ public void testReassignTasks() { case 2 -> addTask(tasks, "assign_one", randomBoolean() ? null : "no_longer_exists"); } } - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); clusterState = builder.metadata(metadata).nodes(nodes).build(); ClusterState newClusterState = reassign(clusterState); - PersistentTasksCustomMetadata tasksInProgress = newClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = newClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); assertThat("number of tasks shouldn't change as a result or reassignment", numberOfTasks, equalTo(tasksInProgress.tasks().size())); @@ -290,7 +293,7 @@ public void testReassignTasks() { assertThat(task.getExecutorNode(), notNullValue()); assertThat(task.isAssigned(), equalTo(true)); if (clusterState.nodes().nodeExists(task.getExecutorNode()) == false) { - logger.info(clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE).toString()); + logger.info(clusterState.metadata().section(PersistentTasksMetadataSection.TYPE).toString()); } assertThat( "task should be assigned to a node that is in the cluster, was assigned to " + task.getExecutorNode(), @@ -333,13 +336,13 @@ public void testPersistentTasksChangedTaskAdded() { ClusterState previous = ClusterState.builder(new ClusterName("_name")).nodes(nodes).build(); - PersistentTasksCustomMetadata tasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection tasks = PersistentTasksMetadataSection.builder() .addTask("_task_1", "test", null, new Assignment(null, "_reason")) .build(); ClusterState current = ClusterState.builder(new ClusterName("_name")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks)) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasks)) .build(); assertTrue("persistent tasks changed (task added)", persistentTasksChanged(new ClusterChangedEvent("test", current, previous))); @@ -351,7 +354,7 @@ public void testPersistentTasksChangedTaskRemoved() { .add(DiscoveryNodeUtils.create("_node_2")) .build(); - PersistentTasksCustomMetadata previousTasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection previousTasks = PersistentTasksMetadataSection.builder() .addTask("_task_1", "test", null, new Assignment("_node_1", "_reason")) .addTask("_task_2", "test", null, new Assignment("_node_1", "_reason")) .addTask("_task_3", "test", null, new Assignment("_node_2", "_reason")) @@ -359,17 +362,17 @@ public void testPersistentTasksChangedTaskRemoved() { ClusterState previous = ClusterState.builder(new ClusterName("_name")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, previousTasks)) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, previousTasks)) .build(); - PersistentTasksCustomMetadata currentTasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection currentTasks = PersistentTasksMetadataSection.builder() .addTask("_task_1", "test", null, new Assignment("_node_1", "_reason")) .addTask("_task_3", "test", null, new Assignment("_node_2", "_reason")) .build(); ClusterState current = ClusterState.builder(new ClusterName("_name")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, currentTasks)) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, currentTasks)) .build(); assertTrue("persistent tasks changed (task removed)", persistentTasksChanged(new ClusterChangedEvent("test", current, previous))); @@ -381,24 +384,24 @@ public void testPersistentTasksAssigned() { .add(DiscoveryNodeUtils.create("_node_2")) .build(); - PersistentTasksCustomMetadata previousTasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection previousTasks = PersistentTasksMetadataSection.builder() .addTask("_task_1", "test", null, new Assignment("_node_1", "")) .addTask("_task_2", "test", null, new Assignment(null, "unassigned")) .build(); ClusterState previous = ClusterState.builder(new ClusterName("_name")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, previousTasks)) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, previousTasks)) .build(); - PersistentTasksCustomMetadata currentTasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection currentTasks = PersistentTasksMetadataSection.builder() .addTask("_task_1", "test", null, new Assignment("_node_1", "")) .addTask("_task_2", "test", null, new Assignment("_node_2", "")) .build(); ClusterState current = ClusterState.builder(new ClusterName("_name")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, currentTasks)) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, currentTasks)) .build(); assertTrue("persistent tasks changed (task assigned)", persistentTasksChanged(new ClusterChangedEvent("test", current, previous))); @@ -418,13 +421,14 @@ public void testNeedsReassignment() { public void testPeriodicRecheck() throws Exception { ClusterState initialState = initialState(); ClusterState.Builder builder = ClusterState.builder(initialState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - initialState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + initialState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(initialState.nodes()); addTestNodes(nodes, randomIntBetween(1, 3)); addTask(tasks, "assign_based_on_non_cluster_state_condition", null); - Metadata.Builder metadata = Metadata.builder(initialState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(initialState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); ClusterState clusterState = builder.metadata(metadata).nodes(nodes).build(); nonClusterStateCondition = false; @@ -441,7 +445,7 @@ public void testPeriodicRecheck() throws Exception { ClusterState newClusterState = recheckTestClusterService.state(); { - PersistentTasksCustomMetadata tasksInProgress = newClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = newClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); for (PersistentTask task : tasksInProgress.tasks()) { assertThat(task.getExecutorNode(), nullValue()); @@ -462,9 +466,9 @@ public void testPeriodicRecheck() throws Exception { service.setRecheckInterval(TimeValue.timeValueMillis(1)); assertBusy(() -> { - PersistentTasksCustomMetadata tasksInProgress = recheckTestClusterService.state() + PersistentTasksMetadataSection tasksInProgress = recheckTestClusterService.state() .getMetadata() - .custom(PersistentTasksCustomMetadata.TYPE); + .section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); for (PersistentTask task : tasksInProgress.tasks()) { assertThat(task.getExecutorNode(), notNullValue()); @@ -478,13 +482,14 @@ public void testPeriodicRecheck() throws Exception { public void testPeriodicRecheckOffMaster() { ClusterState initialState = initialState(); ClusterState.Builder builder = ClusterState.builder(initialState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - initialState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + initialState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(initialState.nodes()); addTestNodes(nodes, randomIntBetween(1, 3)); addTask(tasks, "assign_based_on_non_cluster_state_condition", null); - Metadata.Builder metadata = Metadata.builder(initialState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(initialState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); ClusterState clusterState = builder.metadata(metadata).nodes(nodes).build(); nonClusterStateCondition = false; @@ -499,7 +504,7 @@ public void testPeriodicRecheckOffMaster() { ClusterState newClusterState = recheckTestClusterService.state(); { - PersistentTasksCustomMetadata tasksInProgress = newClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = newClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); for (PersistentTask task : tasksInProgress.tasks()) { assertThat(task.getExecutorNode(), nullValue()); @@ -529,8 +534,8 @@ public void testPeriodicRecheckOffMaster() { public void testUnassignTask() throws InterruptedException { ClusterState clusterState = initialState(); ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() .add(DiscoveryNodeUtils.create("_node_1")) @@ -540,7 +545,8 @@ public void testUnassignTask() throws InterruptedException { String unassignedId = addTask(tasks, "unassign", "_node_2"); - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); clusterState = builder.metadata(metadata).nodes(nodes).build(); setState(clusterService, clusterState); PersistentTasksClusterService service = createService((params, candidateNodes, currentState) -> new Assignment("_node_2", "test")); @@ -563,8 +569,8 @@ public void testUnassignTask() throws InterruptedException { public void testUnassignNonExistentTask() throws InterruptedException { ClusterState clusterState = initialState(); ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder() .add(DiscoveryNodeUtils.create("_node_1")) @@ -572,7 +578,8 @@ public void testUnassignNonExistentTask() throws InterruptedException { .masterNodeId("_node_1") .add(DiscoveryNodeUtils.create("_node_2")); - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); clusterState = builder.metadata(metadata).nodes(nodes).build(); setState(clusterService, clusterState); PersistentTasksClusterService service = createService((params, candidateNodes, currentState) -> new Assignment("_node_2", "test")); @@ -598,8 +605,8 @@ public void testTasksNotAssignedToShuttingDownNodes() { )) { ClusterState clusterState = initialState(); ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(clusterState.nodes()); addTestNodes(nodes, randomIntBetween(2, 10)); @@ -613,7 +620,7 @@ public void testTasksNotAssignedToShuttingDownNodes() { } Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) - .putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); clusterState = builder.metadata(metadata).nodes(nodes).build(); // Now that we have a bunch of tasks that need to be assigned, let's @@ -639,7 +646,7 @@ public void testTasksNotAssignedToShuttingDownNodes() { ClusterState shutdownState = ClusterState.builder(clusterState) .metadata( Metadata.builder(clusterState.metadata()) - .putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadataMap)) + .putSection(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadataMap)) .build() ) .build(); @@ -647,7 +654,7 @@ public void testTasksNotAssignedToShuttingDownNodes() { logger.info("--> assigning after marking nodes as shutting down"); nonClusterStateCondition = randomBoolean(); clusterState = reassign(shutdownState); - PersistentTasksCustomMetadata tasksInProgress = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(tasksInProgress, notNullValue()); Set nodesWithTasks = tasksInProgress.tasks() .stream() @@ -668,13 +675,14 @@ public void testReassignOnlyOnce() throws Exception { CountDownLatch latch = new CountDownLatch(1); ClusterState initialState = initialState(); ClusterState.Builder builder = ClusterState.builder(initialState); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder( - initialState.metadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder( + initialState.metadata().section(PersistentTasksMetadataSection.TYPE) ); DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(initialState.nodes()); addTestNodes(nodes, randomIntBetween(1, 3)); addTask(tasks, "assign_based_on_non_cluster_state_condition", null); - Metadata.Builder metadata = Metadata.builder(initialState.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + Metadata.Builder metadata = Metadata.builder(initialState.metadata()) + .putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); ClusterState clusterState = builder.metadata(metadata).nodes(nodes).build(); boolean shouldSimulateFailure = randomBoolean(); @@ -788,7 +796,7 @@ private ClusterState reassign(ClusterState clusterState) { private Assignment assignOnlyOneTaskAtATime(Collection candidateNodes, ClusterState clusterState) { DiscoveryNodes nodes = clusterState.nodes(); - PersistentTasksCustomMetadata tasksInProgress = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasksInProgress.findTasks( TestPersistentTasksExecutor.NAME, task -> "assign_one".equals(((TestParams) task.getParams()).getTestParam()) @@ -824,12 +832,12 @@ private String dumpEvent(ClusterChangedEvent event) { + " routing_table_changed:" + event.routingTableChanged() + " tasks: " - + event.state().metadata().custom(PersistentTasksCustomMetadata.TYPE); + + event.state().metadata().section(PersistentTasksMetadataSection.TYPE); } private ClusterState significantChange(ClusterState clusterState) { ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks != null) { if (randomBoolean()) { for (PersistentTask task : tasks.tasks()) { @@ -847,14 +855,14 @@ private ClusterState significantChange(ClusterState clusterState) { // we don't have any unassigned tasks - add some if (randomBoolean()) { logger.info("added random task"); - addRandomTask(builder, Metadata.builder(clusterState.metadata()), PersistentTasksCustomMetadata.builder(tasks), null); + addRandomTask(builder, Metadata.builder(clusterState.metadata()), PersistentTasksMetadataSection.builder(tasks), null); tasksOrNodesChanged = true; } else { logger.info("added unassignable task with custom assignment message"); addRandomTask( builder, Metadata.builder(clusterState.metadata()), - PersistentTasksCustomMetadata.builder(tasks), + PersistentTasksMetadataSection.builder(tasks), new Assignment(null, "change me"), "never_assign" ); @@ -882,10 +890,10 @@ private ClusterState significantChange(ClusterState clusterState) { return builder.build(); } - private PersistentTasksCustomMetadata removeTasksWithChangingAssignment(PersistentTasksCustomMetadata tasks) { + private PersistentTasksMetadataSection removeTasksWithChangingAssignment(PersistentTasksMetadataSection tasks) { if (tasks != null) { boolean changed = false; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(tasks); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(tasks); for (PersistentTask task : tasks.tasks()) { // Remove all unassigned tasks that cause changing assignments they might trigger a significant change if ("never_assign".equals(((TestParams) task.getParams()).getTestParam()) @@ -904,9 +912,9 @@ private PersistentTasksCustomMetadata removeTasksWithChangingAssignment(Persiste private ClusterState insignificantChange(ClusterState clusterState) { ClusterState.Builder builder = ClusterState.builder(clusterState); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); tasks = removeTasksWithChangingAssignment(tasks); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(tasks); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(tasks); if (randomBoolean()) { if (hasAssignableTasks(tasks, clusterState.nodes()) == false) { @@ -922,7 +930,7 @@ private ClusterState insignificantChange(ClusterState clusterState) { } logger.info("changed routing table"); Metadata.Builder metadata = Metadata.builder(clusterState.metadata()); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()); RoutingTable.Builder routingTable = RoutingTable.builder( TestShardRoutingRoleStrategies.DEFAULT_ROLE_ONLY, clusterState.routingTable() @@ -947,11 +955,11 @@ private ClusterState insignificantChange(ClusterState clusterState) { if (randomBoolean()) { logger.info("removed all tasks"); Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) - .putCustom(PersistentTasksCustomMetadata.TYPE, PersistentTasksCustomMetadata.builder().build()); + .putSection(PersistentTasksMetadataSection.TYPE, PersistentTasksMetadataSection.builder().build()); return builder.metadata(metadata).build(); } else { logger.info("set task custom to null"); - Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).removeCustom(PersistentTasksCustomMetadata.TYPE); + Metadata.Builder metadata = Metadata.builder(clusterState.metadata()).removeSection(PersistentTasksMetadataSection.TYPE); return builder.metadata(metadata).build(); } } @@ -971,11 +979,11 @@ private ClusterState insignificantChange(ClusterState clusterState) { .build(); Metadata.Builder metadata = Metadata.builder(clusterState.metadata()) .put(indexMetadata, false) - .putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()); + .putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()); return builder.metadata(metadata).build(); } - private boolean hasAssignableTasks(PersistentTasksCustomMetadata tasks, DiscoveryNodes discoveryNodes) { + private boolean hasAssignableTasks(PersistentTasksMetadataSection tasks, DiscoveryNodes discoveryNodes) { if (tasks == null || tasks.tasks().isEmpty()) { return false; } @@ -987,14 +995,14 @@ private boolean hasAssignableTasks(PersistentTasksCustomMetadata tasks, Discover }); } - private boolean hasTasksAssignedTo(PersistentTasksCustomMetadata tasks, String nodeId) { + private boolean hasTasksAssignedTo(PersistentTasksMetadataSection tasks, String nodeId) { return tasks != null && tasks.tasks().stream().anyMatch(task -> nodeId.equals(task.getExecutorNode())) == false; } private ClusterState.Builder addRandomTask( ClusterState.Builder clusterStateBuilder, Metadata.Builder metadata, - PersistentTasksCustomMetadata.Builder tasks, + PersistentTasksMetadataSection.Builder tasks, String node ) { return addRandomTask(clusterStateBuilder, metadata, tasks, new Assignment(node, randomAlphaOfLength(10)), randomAlphaOfLength(10)); @@ -1003,19 +1011,19 @@ private ClusterState.Builder addRandomTask( private ClusterState.Builder addRandomTask( ClusterState.Builder clusterStateBuilder, Metadata.Builder metadata, - PersistentTasksCustomMetadata.Builder tasks, + PersistentTasksMetadataSection.Builder tasks, Assignment assignment, String param ) { return clusterStateBuilder.metadata( - metadata.putCustom( - PersistentTasksCustomMetadata.TYPE, + metadata.putSection( + PersistentTasksMetadataSection.TYPE, tasks.addTask(UUIDs.base64UUID(), TestPersistentTasksExecutor.NAME, new TestParams(param), assignment).build() ) ); } - private String addTask(PersistentTasksCustomMetadata.Builder tasks, String param, String node) { + private String addTask(PersistentTasksMetadataSection.Builder tasks, String param, String node) { String id = UUIDs.base64UUID(); tasks.addTask(id, TestPersistentTasksExecutor.NAME, new TestParams(param), new Assignment(node, "explanation: " + param)); return id; diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java index 30e3a5e2f5218..75bb0f02c9ca7 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksDecidersTestCase.java @@ -93,12 +93,12 @@ protected static ClusterState createClusterStateWithTasks(final int nbNodes, fin nodes.add(DiscoveryNodeUtils.create("_node_" + i)); } - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); for (int i = 0; i < nbTasks; i++) { - tasks.addTask("_task_" + i, "test", null, new PersistentTasksCustomMetadata.Assignment(null, "initialized")); + tasks.addTask("_task_" + i, "test", null, new PersistentTasksMetadataSection.Assignment(null, "initialized")); } - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasks.build()).build(); return ClusterState.builder(ClusterName.DEFAULT).nodes(nodes).metadata(metadata).build(); } @@ -106,7 +106,7 @@ protected static ClusterState createClusterStateWithTasks(final int nbNodes, fin /** Asserts that the given cluster state contains nbTasks tasks that are assigned **/ @SuppressWarnings("rawtypes") protected static void assertNbAssignedTasks(final long nbTasks, final ClusterState clusterState) { - assertPersistentTasks(nbTasks, clusterState, PersistentTasksCustomMetadata.PersistentTask::isAssigned); + assertPersistentTasks(nbTasks, clusterState, PersistentTasksMetadataSection.PersistentTask::isAssigned); } /** Asserts that the given cluster state contains nbTasks tasks that are NOT assigned **/ @@ -119,9 +119,9 @@ protected static void assertNbUnassignedTasks(final long nbTasks, final ClusterS protected static void assertPersistentTasks( final long nbTasks, final ClusterState clusterState, - final Predicate predicate + final Predicate predicate ) { - PersistentTasksCustomMetadata tasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.metadata().section(PersistentTasksMetadataSection.TYPE); assertNotNull("Persistent tasks must be not null", tasks); assertEquals(nbTasks, tasks.tasks().stream().filter(predicate).count()); } diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java index 0fec0a9bc8e57..f3d166a7cf5f5 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksExecutorResponseTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.test.AbstractWireSerializingTestCase; @@ -27,7 +27,7 @@ protected PersistentTaskResponse createTestInstance() { TestPersistentTasksExecutor.NAME, new TestPersistentTasksPlugin.TestParams("test"), randomLong(), - PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT + PersistentTasksMetadataSection.INITIAL_ASSIGNMENT ) ); } else { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetadataTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksMetadataSectionTests.java similarity index 78% rename from server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetadataTests.java rename to server/src/test/java/org/elasticsearch/persistent/PersistentTasksMetadataSectionTests.java index 233c1f0dc4244..a73239cc02a93 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksCustomMetadataTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksMetadataSectionTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.Metadata.Custom; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.UUIDs; @@ -26,9 +26,9 @@ import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Builder; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Builder; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.State; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; @@ -58,12 +58,12 @@ import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.sameInstance; -public class PersistentTasksCustomMetadataTests extends ChunkedToXContentDiffableSerializationTestCase { +public class PersistentTasksMetadataSectionTests extends ChunkedToXContentDiffableSerializationTestCase { @Override - protected PersistentTasksCustomMetadata createTestInstance() { + protected PersistentTasksMetadataSection createTestInstance() { int numberOfTasks = randomInt(10); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); for (int i = 0; i < numberOfTasks; i++) { String taskId = UUIDs.base64UUID(); tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, new TestParams(randomAlphaOfLength(10)), randomAssignment()); @@ -76,21 +76,21 @@ protected PersistentTasksCustomMetadata createTestInstance() { } @Override - protected Custom mutateInstance(Custom instance) { + protected MetadataSection mutateInstance(MetadataSection instance) { return null;// TODO implement https://github.com/elastic/elasticsearch/issues/25929 } @Override - protected Writeable.Reader instanceReader() { - return PersistentTasksCustomMetadata::new; + protected Writeable.Reader instanceReader() { + return PersistentTasksMetadataSection::new; } @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { return new NamedWriteableRegistry( Arrays.asList( - new Entry(Metadata.Custom.class, PersistentTasksCustomMetadata.TYPE, PersistentTasksCustomMetadata::new), - new Entry(NamedDiff.class, PersistentTasksCustomMetadata.TYPE, PersistentTasksCustomMetadata::readDiffFrom), + new Entry(MetadataSection.class, PersistentTasksMetadataSection.TYPE, PersistentTasksMetadataSection::new), + new Entry(NamedDiff.class, PersistentTasksMetadataSection.TYPE, PersistentTasksMetadataSection::readDiffFrom), new Entry(PersistentTaskParams.class, TestPersistentTasksExecutor.NAME, TestParams::new), new Entry(PersistentTaskState.class, TestPersistentTasksExecutor.NAME, State::new) ) @@ -98,8 +98,8 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { } @Override - protected Custom makeTestChanges(Custom testInstance) { - Builder builder = PersistentTasksCustomMetadata.builder((PersistentTasksCustomMetadata) testInstance); + protected MetadataSection makeTestChanges(MetadataSection testInstance) { + Builder builder = PersistentTasksMetadataSection.builder((PersistentTasksMetadataSection) testInstance); switch (randomInt(3)) { case 0: addRandomTask(builder); @@ -130,13 +130,13 @@ protected Custom makeTestChanges(Custom testInstance) { } @Override - protected Writeable.Reader> diffReader() { - return PersistentTasksCustomMetadata::readDiffFrom; + protected Writeable.Reader> diffReader() { + return PersistentTasksMetadataSection::readDiffFrom; } @Override - protected PersistentTasksCustomMetadata doParseInstance(XContentParser parser) { - return PersistentTasksCustomMetadata.fromXContent(parser); + protected PersistentTasksMetadataSection doParseInstance(XContentParser parser) { + return PersistentTasksMetadataSection.fromXContent(parser); } private String addRandomTask(Builder builder) { @@ -145,7 +145,7 @@ private String addRandomTask(Builder builder) { return taskId; } - private String pickRandomTask(PersistentTasksCustomMetadata.Builder testInstance) { + private String pickRandomTask(PersistentTasksMetadataSection.Builder testInstance) { return randomFrom(new ArrayList<>(testInstance.getCurrentTaskIds())); } @@ -169,9 +169,9 @@ protected NamedXContentRegistry xContentRegistry() { @SuppressWarnings("unchecked") public void testSerializationContext() throws Exception { - PersistentTasksCustomMetadata testInstance = createTestInstance(); + PersistentTasksMetadataSection testInstance = createTestInstance(); for (int i = 0; i < randomInt(10); i++) { - testInstance = (PersistentTasksCustomMetadata) makeTestChanges(testInstance); + testInstance = (PersistentTasksMetadataSection) makeTestChanges(testInstance); } ToXContent.MapParams params = new ToXContent.MapParams( @@ -181,7 +181,7 @@ public void testSerializationContext() throws Exception { XContentType xContentType = randomFrom(XContentType.values()); BytesReference shuffled = toShuffledXContent(asXContent(testInstance), xContentType, params, false); - PersistentTasksCustomMetadata newInstance; + PersistentTasksMetadataSection newInstance; try (XContentParser parser = createParser(XContentFactory.xContent(xContentType), shuffled)) { newInstance = doParseInstance(parser); } @@ -205,14 +205,14 @@ public void testSerializationContext() throws Exception { } public void testBuilder() { - PersistentTasksCustomMetadata persistentTasks = null; + PersistentTasksMetadataSection persistentTasks = null; String lastKnownTask = ""; for (int i = 0; i < randomIntBetween(10, 100); i++) { final Builder builder; if (randomBoolean()) { - builder = PersistentTasksCustomMetadata.builder(); + builder = PersistentTasksMetadataSection.builder(); } else { - builder = PersistentTasksCustomMetadata.builder(persistentTasks); + builder = PersistentTasksMetadataSection.builder(persistentTasks); } boolean changed = false; for (int j = 0; j < randomIntBetween(1, 10); j++) { @@ -256,7 +256,7 @@ public void testBuilder() { } public void testMinVersionSerialization() throws IOException { - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); TransportVersion minVersion = getFirstVersion(); TransportVersion streamVersion = randomVersionBetween(random(), minVersion, getPreviousVersion(TransportVersion.current())); @@ -287,7 +287,7 @@ public void testMinVersionSerialization() throws IOException { final StreamInput input = out.bytes().streamInput(); input.setTransportVersion(streamVersion); - PersistentTasksCustomMetadata read = new PersistentTasksCustomMetadata( + PersistentTasksMetadataSection read = new PersistentTasksMetadataSection( new NamedWriteableAwareStreamInput(input, getNamedWriteableRegistry()) ); @@ -296,7 +296,7 @@ public void testMinVersionSerialization() throws IOException { public void testDisassociateDeadNodes_givenNoPersistentTasks() { ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")).build(); - ClusterState returnedState = PersistentTasksCustomMetadata.disassociateDeadNodes(originalState); + ClusterState returnedState = PersistentTasksMetadataSection.disassociateDeadNodes(originalState); assertThat(originalState, sameInstance(returnedState)); } @@ -308,23 +308,23 @@ public void testDisassociateDeadNodes_givenAssignedPersistentTask() { .build(); String taskName = "test/task"; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder() .addTask( "task-id", taskName, emptyTaskParams(taskName), - new PersistentTasksCustomMetadata.Assignment("node1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node1", "test assignment") ); ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); - ClusterState returnedState = PersistentTasksCustomMetadata.disassociateDeadNodes(originalState); + ClusterState returnedState = PersistentTasksMetadataSection.disassociateDeadNodes(originalState); assertThat(originalState, sameInstance(returnedState)); - PersistentTasksCustomMetadata originalTasks = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata(originalState); - PersistentTasksCustomMetadata returnedTasks = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata(returnedState); + PersistentTasksMetadataSection originalTasks = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata(originalState); + PersistentTasksMetadataSection returnedTasks = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata(returnedState); assertEquals(originalTasks, returnedTasks); } @@ -336,34 +336,34 @@ public void testDisassociateDeadNodes() { .build(); String taskName = "test/task"; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder() .addTask( "assigned-task", taskName, emptyTaskParams(taskName), - new PersistentTasksCustomMetadata.Assignment("node1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node1", "test assignment") ) .addTask( "task-on-deceased-node", taskName, emptyTaskParams(taskName), - new PersistentTasksCustomMetadata.Assignment("left-the-cluster", "test assignment") + new PersistentTasksMetadataSection.Assignment("left-the-cluster", "test assignment") ); ClusterState originalState = ClusterState.builder(new ClusterName("persistent-tasks-tests")) .nodes(nodes) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); - ClusterState returnedState = PersistentTasksCustomMetadata.disassociateDeadNodes(originalState); + ClusterState returnedState = PersistentTasksMetadataSection.disassociateDeadNodes(originalState); assertThat(originalState, not(sameInstance(returnedState))); - PersistentTasksCustomMetadata originalTasks = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata(originalState); - PersistentTasksCustomMetadata returnedTasks = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata(returnedState); + PersistentTasksMetadataSection originalTasks = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata(originalState); + PersistentTasksMetadataSection returnedTasks = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata(returnedState); assertNotEquals(originalTasks, returnedTasks); assertEquals(originalTasks.getTask("assigned-task"), returnedTasks.getTask("assigned-task")); assertNotEquals(originalTasks.getTask("task-on-deceased-node"), returnedTasks.getTask("task-on-deceased-node")); - assertEquals(PersistentTasksCustomMetadata.LOST_NODE_ASSIGNMENT, returnedTasks.getTask("task-on-deceased-node").getAssignment()); + assertEquals(PersistentTasksMetadataSection.LOST_NODE_ASSIGNMENT, returnedTasks.getTask("task-on-deceased-node").getAssignment()); } private PersistentTaskParams emptyTaskParams(String taskName) { diff --git a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java index 8178505470d9a..52d51e110564e 100644 --- a/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java +++ b/server/src/test/java/org/elasticsearch/persistent/PersistentTasksNodeServiceTests.java @@ -23,8 +23,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestParams; import org.elasticsearch.persistent.TestPersistentTasksPlugin.TestPersistentTasksExecutor; import org.elasticsearch.tasks.Task; @@ -118,7 +118,7 @@ public void testStartTask() { ClusterState state = createInitialClusterState(nonLocalNodesCount, Settings.EMPTY); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); boolean added = false; if (nonLocalNodesCount > 0) { for (int i = 0; i < randomInt(5); i++) { @@ -145,7 +145,7 @@ public void testStartTask() { } Metadata.Builder metadata = Metadata.builder(state.metadata()); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); ClusterState newClusterState = ClusterState.builder(state).metadata(metadata).build(); coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state)); @@ -235,13 +235,13 @@ public void testParamsStatusAndNodeTaskAreDelegated() throws Exception { ClusterState state = createInitialClusterState(1, Settings.EMPTY); PersistentTaskState taskState = new TestPersistentTasksPlugin.State("_test_phase"); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); String taskId = UUIDs.base64UUID(); TestParams taskParams = new TestParams("other_0"); tasks.addTask(taskId, TestPersistentTasksExecutor.NAME, taskParams, new Assignment("this_node", "test assignment on other node")); tasks.updateTaskState(taskId, taskState); Metadata.Builder metadata = Metadata.builder(state.metadata()); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); ClusterState newClusterState = ClusterState.builder(state).metadata(metadata).build(); coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state)); @@ -422,7 +422,7 @@ public void sendCompletionRequest( assertThat(capturedTaskId.get(), equalTo(persistentId)); assertThat(capturedLocalAbortReason.get(), equalTo("testing local abort")); // Notify successful unassignment - PersistentTasksCustomMetadata persistentTasksMetadata = newClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasksMetadata = newClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); capturedListener.get().onResponse(persistentTasksMetadata.getTask(persistentId)); // Check the task is now removed from the local task manager @@ -511,7 +511,7 @@ public void sendCompletionRequest( ClusterState state = createInitialClusterState(0, Settings.EMPTY); - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); tasks.addTask( UUIDs.base64UUID(), @@ -521,7 +521,7 @@ public void sendCompletionRequest( ); Metadata.Builder metadata = Metadata.builder(state.metadata()); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); ClusterState newClusterState = ClusterState.builder(state).metadata(metadata).build(); coordinator.clusterChanged(new ClusterChangedEvent("test", newClusterState, state)); @@ -533,14 +533,14 @@ public void sendCompletionRequest( } private ClusterState addTask(ClusterState state, String action, Params params, String node) { - PersistentTasksCustomMetadata.Builder builder = PersistentTasksCustomMetadata.builder( - state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder builder = PersistentTasksMetadataSection.builder( + state.getMetadata().section(PersistentTasksMetadataSection.TYPE) ); return ClusterState.builder(state) .metadata( Metadata.builder(state.metadata()) - .putCustom( - PersistentTasksCustomMetadata.TYPE, + .putSection( + PersistentTasksMetadataSection.TYPE, builder.addTask(UUIDs.base64UUID(), action, params, new Assignment(node, "test assignment")).build() ) ) @@ -548,15 +548,15 @@ private ClusterState addTask(ClusterState } private ClusterState reallocateTask(ClusterState state, String taskId, String node) { - PersistentTasksCustomMetadata.Builder builder = PersistentTasksCustomMetadata.builder( - state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder builder = PersistentTasksMetadataSection.builder( + state.getMetadata().section(PersistentTasksMetadataSection.TYPE) ); assertTrue(builder.hasTask(taskId)); return ClusterState.builder(state) .metadata( Metadata.builder(state.metadata()) - .putCustom( - PersistentTasksCustomMetadata.TYPE, + .putSection( + PersistentTasksMetadataSection.TYPE, builder.reassignTask(taskId, new Assignment(node, "test assignment")).build() ) ) @@ -564,12 +564,14 @@ private ClusterState reallocateTask(ClusterState state, String taskId, String no } private ClusterState removeTask(ClusterState state, String taskId) { - PersistentTasksCustomMetadata.Builder builder = PersistentTasksCustomMetadata.builder( - state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE) + PersistentTasksMetadataSection.Builder builder = PersistentTasksMetadataSection.builder( + state.getMetadata().section(PersistentTasksMetadataSection.TYPE) ); assertTrue(builder.hasTask(taskId)); return ClusterState.builder(state) - .metadata(Metadata.builder(state.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, builder.removeTask(taskId).build())) + .metadata( + Metadata.builder(state.metadata()).putSection(PersistentTasksMetadataSection.TYPE, builder.removeTask(taskId).build()) + ) .build(); } diff --git a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java index 6406ee640380b..4367fac083e14 100644 --- a/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java +++ b/server/src/test/java/org/elasticsearch/persistent/TestPersistentTasksPlugin.java @@ -36,8 +36,8 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.settings.SettingsModule; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.plugins.ActionPlugin; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; diff --git a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java index 62443d6accb41..a5b4f3ed647ab 100644 --- a/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java +++ b/server/src/test/java/org/elasticsearch/readiness/ReadinessServiceTests.java @@ -238,7 +238,7 @@ public void testStatusChange() throws Exception { ClusterState nodeShuttingDownState = ClusterState.builder(completeState) .metadata( Metadata.builder(completeState.metadata()) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java index 83cb189415f7e..4f0b2177e5aa2 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoriesServiceTests.java @@ -379,7 +379,7 @@ public void testRegisterRepositorySuccessAfterCreationFailed() { private ClusterState createClusterStateWithRepo(String repoName, String repoType) { ClusterState.Builder state = ClusterState.builder(new ClusterName("test")); Metadata.Builder mdBuilder = Metadata.builder(); - mdBuilder.putCustom( + mdBuilder.putSection( RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(new RepositoryMetadata(repoName, repoType, Settings.EMPTY))) ); diff --git a/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java b/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java index 04859d2847522..55201c5c1dfa7 100644 --- a/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/ResolvedRepositoriesTests.java @@ -86,7 +86,7 @@ private static ClusterState clusterStateWithRepositories(String... repoNames) { repositories.add(new RepositoryMetadata(repoName, "test", Settings.EMPTY)); } return ClusterState.EMPTY_STATE.copyAndUpdateMetadata( - b -> b.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositories)) + b -> b.putSection(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositories)) ); } diff --git a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java index 612039685821a..66fa1f8b1fb66 100644 --- a/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java +++ b/server/src/test/java/org/elasticsearch/script/ScriptServiceTests.java @@ -448,7 +448,7 @@ public void testGetStoredScript() throws Exception { ClusterState cs = ClusterState.builder(new ClusterName("_name")) .metadata( Metadata.builder() - .putCustom( + .putSection( ScriptMetadata.TYPE, new ScriptMetadata.Builder(null).storeScript("_id", StoredScriptSource.parse(new BytesArray(""" {"script": {"lang": "_lang", "source": "abc"} }"""), XContentType.JSON)).build() diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java index 2feb9a6c65825..1ee75c3b75db1 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoriesMetadataSerializationTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.metadata.Metadata.Custom; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -24,10 +24,10 @@ import java.util.Comparator; import java.util.List; -public class RepositoriesMetadataSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { +public class RepositoriesMetadataSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { @Override - protected Custom createTestInstance() { + protected MetadataSection createTestInstance() { int numberOfRepositories = randomInt(10); List entries = new ArrayList<>(); for (int i = 0; i < numberOfRepositories; i++) { @@ -49,12 +49,12 @@ protected Custom createTestInstance() { } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return RepositoriesMetadata::new; } @Override - protected Custom mutateInstance(Custom instance) { + protected MetadataSection mutateInstance(MetadataSection instance) { List entries = new ArrayList<>(((RepositoriesMetadata) instance).repositories()); boolean addEntry = entries.isEmpty() ? true : randomBoolean(); if (addEntry) { @@ -79,7 +79,7 @@ public Settings randomSettings() { } @Override - protected Custom makeTestChanges(Custom testInstance) { + protected MetadataSection makeTestChanges(MetadataSection testInstance) { RepositoriesMetadata repositoriesMetadata = (RepositoriesMetadata) testInstance; List repos = new ArrayList<>(repositoriesMetadata.repositories()); if (randomBoolean() && repos.size() > 1) { @@ -98,7 +98,7 @@ protected Custom makeTestChanges(Custom testInstance) { } @Override - protected Writeable.Reader> diffReader() { + protected Writeable.Reader> diffReader() { return RepositoriesMetadata::readDiffFrom; } @@ -108,7 +108,7 @@ protected NamedWriteableRegistry getNamedWriteableRegistry() { } @Override - protected Custom doParseInstance(XContentParser parser) throws IOException { + protected MetadataSection doParseInstance(XContentParser parser) throws IOException { assertEquals(XContentParser.Token.START_OBJECT, parser.nextToken()); RepositoriesMetadata repositoriesMetadata = RepositoriesMetadata.fromXContent(parser); assertEquals(XContentParser.Token.END_OBJECT, parser.currentToken()); diff --git a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java index 7744eed90e1cc..c1a518895e1fb 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/RepositoryIntegrityHealthIndicatorServiceTests.java @@ -386,7 +386,7 @@ private ClusterState createClusterStateWith(RepositoriesMetadata metadata) { .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) .nodeFeatures(Map.of(node1.getId(), features, node2.getId(), features)); if (metadata != null) { - builder.metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, metadata)); + builder.metadata(Metadata.builder().putSection(RepositoriesMetadata.TYPE, metadata)); } return builder.build(); } diff --git a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java index 873a35aa49107..ea548864a5d6e 100644 --- a/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java +++ b/server/src/test/java/org/elasticsearch/snapshots/SnapshotsInProgressSerializationTests.java @@ -77,7 +77,7 @@ protected Custom createTestInstance() { private ClusterState getClusterStateWithNodeShutdownMetadata(List nodeIdsForRemoval) { return CLUSTER_STATE_FOR_NODE_SHUTDOWNS.copyAndUpdateMetadata( - mdb -> mdb.putCustom( + mdb -> mdb.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( nodeIdsForRemoval.stream() @@ -468,7 +468,7 @@ public void testXContent() throws IOException { ) .withUpdatedNodeIdsForRemoval( CLUSTER_STATE_FOR_NODE_SHUTDOWNS.copyAndUpdateMetadata( - b -> b.putCustom( + b -> b.putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( diff --git a/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java b/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java index b7fff65a19b64..f23b6e1351a36 100644 --- a/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java +++ b/server/src/test/java/org/elasticsearch/upgrades/FeatureMigrationResultsTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.ElasticsearchException; import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Tuple; import org.elasticsearch.test.ChunkedToXContentDiffableSerializationTestCase; @@ -22,7 +22,7 @@ import java.util.Map; import java.util.stream.Collectors; -public class FeatureMigrationResultsTests extends ChunkedToXContentDiffableSerializationTestCase { +public class FeatureMigrationResultsTests extends ChunkedToXContentDiffableSerializationTestCase { private static final ConstructingObjectParser SINGLE_FEATURE_RESULT_PARSER = new ConstructingObjectParser<>( @@ -80,7 +80,7 @@ private SingleFeatureMigrationResult randomFeatureStatus() { } @Override - protected FeatureMigrationResults mutateInstance(Metadata.Custom instance) { + protected FeatureMigrationResults mutateInstance(MetadataSection instance) { int oldSize = ((FeatureMigrationResults) instance).getFeatureStatuses().size(); if (oldSize == 0 || randomBoolean()) { return new FeatureMigrationResults( @@ -101,7 +101,7 @@ protected boolean assertToXContentEquivalence() { } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return FeatureMigrationResults::new; } @@ -111,12 +111,12 @@ protected FeatureMigrationResults doParseInstance(XContentParser parser) throws } @Override - protected Metadata.Custom makeTestChanges(Metadata.Custom testInstance) { + protected MetadataSection makeTestChanges(MetadataSection testInstance) { return mutateInstance(testInstance); } @Override - protected Writeable.Reader> diffReader() { + protected Writeable.Reader> diffReader() { return FeatureMigrationResults.ResultsDiff::new; } } diff --git a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java index 5b656598451a3..87d0984c7f4fc 100644 --- a/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java +++ b/test/framework/src/main/java/org/elasticsearch/action/support/replication/ClusterStateCreationUtils.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.shard.IndexLongFieldRange; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.SystemIndices; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import java.util.ArrayList; import java.util.Arrays; @@ -579,12 +579,12 @@ private static String selectAndRemove(Set strings) { } private static Metadata.Builder addHealthNode(Metadata.Builder metadataBuilder, DiscoveryNode healthNode) { - PersistentTasksCustomMetadata.Builder tasks = PersistentTasksCustomMetadata.builder(); - PersistentTasksCustomMetadata.Assignment assignment = new PersistentTasksCustomMetadata.Assignment( + PersistentTasksMetadataSection.Builder tasks = PersistentTasksMetadataSection.builder(); + PersistentTasksMetadataSection.Assignment assignment = new PersistentTasksMetadataSection.Assignment( healthNode.getId(), randomAlphaOfLength(10) ); tasks.addTask(HealthNode.TASK_NAME, HealthNode.TASK_NAME, HealthNodeTaskParams.INSTANCE, assignment); - return metadataBuilder.putCustom(PersistentTasksCustomMetadata.TYPE, tasks.build()); + return metadataBuilder.putSection(PersistentTasksMetadataSection.TYPE, tasks.build()); } } diff --git a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java index d31bd16b07fcc..d5c407c047cf2 100644 --- a/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java +++ b/test/framework/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreTestUtil.java @@ -402,7 +402,7 @@ public static ClusterService mockClusterService(RepositoryMetadata metadata) { .metadata( Metadata.builder() .clusterUUID(UUIDs.randomBase64UUID(random())) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(metadata))) + .putSection(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(metadata))) .build() ) .build() diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java index 1b49209b49c7f..690db83d1ce20 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java @@ -511,7 +511,7 @@ protected void assertDocCount(String index, long count) { */ protected void addBwCFailedSnapshot(String repoName, String snapshotName, Map metadata) throws Exception { final ClusterState state = clusterAdmin().prepareState().get().getState(); - final RepositoriesMetadata repositoriesMetadata = state.metadata().custom(RepositoriesMetadata.TYPE); + final RepositoriesMetadata repositoriesMetadata = state.metadata().section(RepositoriesMetadata.TYPE); assertNotNull(repositoriesMetadata); final RepositoryMetadata initialRepoMetadata = repositoriesMetadata.repository(repoName); assertNotNull(initialRepoMetadata); diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index aad3dcc457241..3a191110ef94a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -131,7 +131,7 @@ import org.elasticsearch.indices.store.IndicesStore; import org.elasticsearch.monitor.jvm.HotThreads; import org.elasticsearch.node.NodeMocksPlugin; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.NetworkPlugin; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; @@ -1174,15 +1174,15 @@ public static SearchRequestBuilder prepareSearch(String... indices) { /** * Retrieves the persistent tasks with the requested task name from the given cluster state. */ - public static List> findTasks(ClusterState clusterState, String taskName) { + public static List> findTasks(ClusterState clusterState, String taskName) { return findTasks(clusterState, Set.of(taskName)); } /** * Retrieves the persistent tasks with the requested task names from the given cluster state. */ - public static List> findTasks(ClusterState clusterState, Set taskNames) { - PersistentTasksCustomMetadata tasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); + public static List> findTasks(ClusterState clusterState, Set taskNames) { + PersistentTasksMetadataSection tasks = clusterState.metadata().section(PersistentTasksMetadataSection.TYPE); if (tasks == null) { return List.of(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetadata.java b/test/framework/src/main/java/org/elasticsearch/test/TestMetadataSection.java similarity index 83% rename from test/framework/src/main/java/org/elasticsearch/test/TestCustomMetadata.java rename to test/framework/src/main/java/org/elasticsearch/test/TestMetadataSection.java index 5a63bc1353931..3690bf279ae78 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/TestCustomMetadata.java +++ b/test/framework/src/main/java/org/elasticsearch/test/TestMetadataSection.java @@ -11,7 +11,7 @@ import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -22,10 +22,10 @@ import java.util.Iterator; import java.util.function.Function; -public abstract class TestCustomMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public abstract class TestMetadataSection extends AbstractNamedDiffable implements MetadataSection { private final String data; - protected TestCustomMetadata(String data) { + protected TestMetadataSection(String data) { this.data = data; } @@ -40,7 +40,7 @@ public boolean equals(Object o) { return false; } - TestCustomMetadata that = (TestCustomMetadata) o; + TestMetadataSection that = (TestMetadataSection) o; if (data.equals(that.data) == false) { return false; @@ -54,12 +54,12 @@ public int hashCode() { return data.hashCode(); } - public static T readFrom(Function supplier, StreamInput in) throws IOException { + public static T readFrom(Function supplier, StreamInput in) throws IOException { return supplier.apply(in.readString()); } - public static NamedDiff readDiffFrom(String name, StreamInput in) throws IOException { - return readDiffFrom(Metadata.Custom.class, name, in); + public static NamedDiff readDiffFrom(String name, StreamInput in) throws IOException { + return readDiffFrom(MetadataSection.class, name, in); } @Override @@ -68,7 +68,7 @@ public void writeTo(StreamOutput out) throws IOException { } @SuppressWarnings("unchecked") - public static T fromXContent(Function supplier, XContentParser parser) throws IOException { + public static T fromXContent(Function supplier, XContentParser parser) throws IOException { XContentParser.Token token; String data = null; while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) { diff --git a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java index 1d6faa0f403d4..da4497affbba5 100644 --- a/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java +++ b/x-pack/plugin/apm-data/src/test/java/org/elasticsearch/xpack/apmdata/APMIndexTemplateRegistryTests.java @@ -507,8 +507,8 @@ private ClusterState createClusterState( .componentTemplates(componentTemplates) .indexTemplates(composableTemplates) .transientSettings(nodeSettings) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) - .putCustom(IngestMetadata.TYPE, ingestMetadata) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IngestMetadata.TYPE, ingestMetadata) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java index 8d2a60773d29d..078ee69a5f712 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionIT.java @@ -42,7 +42,7 @@ public void testDeletePolicy() { assertAcked(client().execute(DeleteAutoscalingPolicyAction.INSTANCE, deleteRequest).actionGet()); // now verify that the policy is not in the cluster state final ClusterState state = clusterAdmin().prepareState().get().getState(); - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), not(hasKey(policy.name()))); // and verify that we can not obtain the policy via get diff --git a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java index 1a49211601c4b..0c8cbd07c4c89 100644 --- a/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java +++ b/x-pack/plugin/autoscaling/src/internalClusterTest/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionIT.java @@ -33,7 +33,7 @@ public class TransportPutAutoscalingPolicyActionIT extends AutoscalingIntegTestC public void testAddPolicy() { final AutoscalingPolicy policy = putRandomAutoscalingPolicy(); final ClusterState state = clusterAdmin().prepareState().get().getState(); - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), hasKey(policy.name())); assertThat(metadata.policies().get(policy.name()).policy(), equalTo(policy)); @@ -48,7 +48,7 @@ public void testUpdatePolicy() { ); putAutoscalingPolicy(updatedPolicy); final ClusterState state = clusterAdmin().prepareState().get().getState(); - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), hasKey(policy.name())); assertThat(metadata.policies().get(policy.name()).policy(), equalTo(updatedPolicy)); @@ -60,8 +60,8 @@ public void testNoOpPolicy() { putAutoscalingPolicy(policy); final ClusterState afterState = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName()).state(); assertThat( - beforeState.metadata().custom(AutoscalingMetadata.NAME), - sameInstance(afterState.metadata().custom(AutoscalingMetadata.NAME)) + beforeState.metadata().section(AutoscalingMetadata.NAME), + sameInstance(afterState.metadata().section(AutoscalingMetadata.NAME)) ); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java index 88bd978b6f416..beaaa00701f85 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/Autoscaling.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; @@ -152,7 +152,7 @@ public List getRestHandlers( @Override public List getNamedWriteables() { return List.of( - new NamedWriteableRegistry.Entry(Metadata.Custom.class, AutoscalingMetadata.NAME, AutoscalingMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, AutoscalingMetadata.NAME, AutoscalingMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, AutoscalingMetadata.NAME, AutoscalingMetadata.AutoscalingMetadataDiff::new), new NamedWriteableRegistry.Entry( AutoscalingDeciderResult.Reason.class, @@ -190,7 +190,7 @@ public List getNamedWriteables() { @Override public List getNamedXContent() { return List.of( - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(AutoscalingMetadata.NAME), AutoscalingMetadata::parse) + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(AutoscalingMetadata.NAME), AutoscalingMetadata::parse) ); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java index 5c885ad718d8c..231d9e0c65d3e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadata.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; @@ -35,7 +36,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -public class AutoscalingMetadata implements Metadata.Custom { +public class AutoscalingMetadata implements MetadataSection { public static final String NAME = "autoscaling"; @@ -103,7 +104,7 @@ public boolean isRestorable() { } @Override - public Diff diff(final Metadata.Custom previousState) { + public Diff diff(final MetadataSection previousState) { return new AutoscalingMetadataDiff((AutoscalingMetadata) previousState, this); } @@ -135,7 +136,7 @@ public int hashCode() { return Objects.hash(policies); } - public static class AutoscalingMetadataDiff implements NamedDiff { + public static class AutoscalingMetadataDiff implements NamedDiff { final Diff> policies; @@ -153,7 +154,7 @@ public AutoscalingMetadataDiff(final StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(final Metadata.Custom part) { + public MetadataSection apply(final MetadataSection part) { return new AutoscalingMetadata(new TreeMap<>(policies.apply(((AutoscalingMetadata) part).policies))); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java index 70a5da97aa32b..712de94762bc7 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyAction.java @@ -98,8 +98,8 @@ static ClusterState deleteAutoscalingPolicy(final ClusterState currentState, fin static ClusterState deleteAutoscalingPolicy(final ClusterState currentState, final String name, final Logger logger) { final ClusterState.Builder builder = ClusterState.builder(currentState); final AutoscalingMetadata currentMetadata; - if (currentState.metadata().custom(AutoscalingMetadata.NAME) != null) { - currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + if (currentState.metadata().section(AutoscalingMetadata.NAME) != null) { + currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); } else { // we will reject the request below when we try to look up the policy by name currentMetadata = AutoscalingMetadata.EMPTY; @@ -119,7 +119,7 @@ static ClusterState deleteAutoscalingPolicy(final ClusterState currentState, fin logger.info("deleting autoscaling policy [{}]", name); } final AutoscalingMetadata newMetadata = new AutoscalingMetadata(newPolicies); - builder.metadata(Metadata.builder(currentState.getMetadata()).putCustom(AutoscalingMetadata.NAME, newMetadata).build()); + builder.metadata(Metadata.builder(currentState.getMetadata()).putSection(AutoscalingMetadata.NAME, newMetadata).build()); return builder.build(); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyAction.java index 1e8a5d36d690b..6c75eb989a862 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyAction.java @@ -74,8 +74,8 @@ protected void masterOperation( static AutoscalingPolicy getAutoscalingPolicy(final ClusterState state, final String name) { final AutoscalingMetadata metadata; - if (state.metadata().custom(AutoscalingMetadata.NAME) != null) { - metadata = state.metadata().custom(AutoscalingMetadata.NAME); + if (state.metadata().section(AutoscalingMetadata.NAME) != null) { + metadata = state.metadata().section(AutoscalingMetadata.NAME); } else { // we will reject the request below when we try to look up the policy by name metadata = AutoscalingMetadata.EMPTY; diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java index b07eabf2ed0c3..789bf8732e782 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyAction.java @@ -143,8 +143,8 @@ static ClusterState putAutoscalingPolicy( // AutoscalingCalculateCapacityService#hasUnknownRoles where we shortcut decision making if master node does not know all roles. final ClusterState.Builder builder = ClusterState.builder(currentState); final AutoscalingMetadata currentMetadata; - if (currentState.metadata().custom(AutoscalingMetadata.NAME) != null) { - currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + if (currentState.metadata().section(AutoscalingMetadata.NAME) != null) { + currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); } else { currentMetadata = AutoscalingMetadata.EMPTY; } @@ -184,7 +184,7 @@ static ClusterState putAutoscalingPolicy( logger.info("updating autoscaling policy [{}]", request.name()); } final AutoscalingMetadata newMetadata = new AutoscalingMetadata(newPolicies); - builder.metadata(Metadata.builder(currentState.getMetadata()).putCustom(AutoscalingMetadata.NAME, newMetadata).build()); + builder.metadata(Metadata.builder(currentState.getMetadata()).putSection(AutoscalingMetadata.NAME, newMetadata).build()); return builder.build(); } diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java index d88fa19b18f49..6537e9c70a48f 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityService.java @@ -112,7 +112,7 @@ public SortedMap calculate( AutoscalingNodesInfo autoscalingNodesInfo, Runnable ensureNotCancelled ) { - AutoscalingMetadata autoscalingMetadata = state.metadata().custom(AutoscalingMetadata.NAME); + AutoscalingMetadata autoscalingMetadata = state.metadata().section(AutoscalingMetadata.NAME); if (autoscalingMetadata != null) { return new TreeMap<>( autoscalingMetadata.policies() diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java index 7490c7a73aecb..9900cd504cde8 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodeInfoService.java @@ -195,7 +195,7 @@ private void sendToMissingNodes(Function nodeLookup, Set< } private static Set> calculateAutoscalingRoleSets(ClusterState state) { - AutoscalingMetadata autoscalingMetadata = state.metadata().custom(AutoscalingMetadata.NAME); + AutoscalingMetadata autoscalingMetadata = state.metadata().section(AutoscalingMetadata.NAME); if (autoscalingMetadata != null) { return autoscalingMetadata.policies() .values() diff --git a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java index 2f8cccdc303e6..266e46decf63e 100644 --- a/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java +++ b/x-pack/plugin/autoscaling/src/main/java/org/elasticsearch/xpack/autoscaling/storage/ReactiveStorageDeciderService.java @@ -723,7 +723,7 @@ public AllocationState forecast(long forecastWindow, long now) { return this; } // for now we only look at data-streams. We might want to also detect alias based time-based indices. - DataStreamMetadata dataStreamMetadata = state.metadata().custom(DataStreamMetadata.TYPE); + DataStreamMetadata dataStreamMetadata = state.metadata().section(DataStreamMetadata.TYPE); if (dataStreamMetadata == null) { return this; } diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadataDiffableSerializationTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadataDiffableSerializationTests.java index 1cecd7faf1dfb..81a0618cd8a20 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadataDiffableSerializationTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/AutoscalingMetadataDiffableSerializationTests.java @@ -8,7 +8,7 @@ package org.elasticsearch.xpack.autoscaling; import org.elasticsearch.cluster.Diff; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.ChunkedToXContentDiffableSerializationTestCase; @@ -24,7 +24,7 @@ import static org.elasticsearch.xpack.autoscaling.AutoscalingTestCase.randomAutoscalingMetadata; import static org.elasticsearch.xpack.autoscaling.AutoscalingTestCase.randomAutoscalingPolicy; -public class AutoscalingMetadataDiffableSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { +public class AutoscalingMetadataDiffableSerializationTests extends ChunkedToXContentDiffableSerializationTestCase { @Override protected NamedWriteableRegistry getNamedWriteableRegistry() { @@ -42,7 +42,7 @@ protected AutoscalingMetadata doParseInstance(final XContentParser parser) { } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return AutoscalingMetadata::new; } @@ -52,12 +52,12 @@ protected AutoscalingMetadata createTestInstance() { } @Override - protected Metadata.Custom makeTestChanges(final Metadata.Custom testInstance) { + protected MetadataSection makeTestChanges(final MetadataSection testInstance) { return mutateInstance(testInstance); } @Override - protected Metadata.Custom mutateInstance(final Metadata.Custom instance) { + protected MetadataSection mutateInstance(final MetadataSection instance) { final AutoscalingMetadata metadata = (AutoscalingMetadata) instance; final SortedMap policies = new TreeMap<>(metadata.policies()); if (policies.size() == 0 || randomBoolean()) { @@ -74,7 +74,7 @@ protected Metadata.Custom mutateInstance(final Metadata.Custom instance) { } @Override - protected Writeable.Reader> diffReader() { + protected Writeable.Reader> diffReader() { return AutoscalingMetadata.AutoscalingMetadataDiff::new; } } diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/ReservedAutoscalingPolicyTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/ReservedAutoscalingPolicyTests.java index 4f8cec0ee845d..d9a9369f0aeaa 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/ReservedAutoscalingPolicyTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/ReservedAutoscalingPolicyTests.java @@ -102,7 +102,7 @@ public void testAddRemoveRoleMapping() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, json); assertThat(updatedState.keys(), containsInAnyOrder("my_autoscaling_policy", "my_autoscaling_policy_1")); - AutoscalingMetadata autoMetadata = updatedState.state().metadata().custom(AutoscalingMetadata.NAME); + AutoscalingMetadata autoMetadata = updatedState.state().metadata().section(AutoscalingMetadata.NAME); assertThat(autoMetadata.policies().keySet(), containsInAnyOrder("my_autoscaling_policy", "my_autoscaling_policy_1")); assertThat(autoMetadata.policies().get("my_autoscaling_policy").policy().roles(), containsInAnyOrder("data_hot")); assertThat(autoMetadata.policies().get("my_autoscaling_policy").policy().deciders().keySet(), containsInAnyOrder("fixed")); diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionTests.java index 4e6514face33f..13e8cff4c2d88 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportDeleteAutoscalingPolicyActionTests.java @@ -90,17 +90,17 @@ public void testDeletePolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); currentState = builder.build(); } - final AutoscalingMetadata currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); final String name = randomFrom(currentMetadata.policies().keySet()); final Logger mockLogger = mock(Logger.class); final ClusterState state = TransportDeleteAutoscalingPolicyAction.deleteAutoscalingPolicy(currentState, name, mockLogger); // ensure the policy is deleted from the cluster state - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), not(hasKey(name))); verify(mockLogger).info("deleting autoscaling policy [{}]", name); @@ -121,18 +121,18 @@ public void testDeletePolicyByWildcard() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); currentState = builder.build(); } - final AutoscalingMetadata currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); final String policyName = randomFrom(currentMetadata.policies().keySet()); final String deleteName = randomFrom(policyName.substring(0, between(0, policyName.length()))) + "*"; final Logger mockLogger = mock(Logger.class); final ClusterState state = TransportDeleteAutoscalingPolicyAction.deleteAutoscalingPolicy(currentState, deleteName, mockLogger); // ensure the policy is deleted from the cluster state - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), not(hasKey(policyName))); @@ -155,11 +155,11 @@ public void testDeleteNonExistentPolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); currentState = builder.build(); } - final AutoscalingMetadata currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); final String name = randomValueOtherThanMany(currentMetadata.policies().keySet()::contains, () -> randomAlphaOfLength(8)); final Logger mockLogger = mock(Logger.class); final ResourceNotFoundException e = expectThrows( diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionTests.java index a16b224d25894..0bc127e42098d 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportGetAutoscalingPolicyActionTests.java @@ -94,11 +94,11 @@ public void testGetPolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); state = builder.build(); } - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); final String name = randomFrom(metadata.policies().keySet()); final AutoscalingPolicy policy = TransportGetAutoscalingPolicyAction.getAutoscalingPolicy(state, name); @@ -110,11 +110,11 @@ public void testGetNonExistentPolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); state = builder.build(); } - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); final String name = randomValueOtherThanMany(metadata.policies().keySet()::contains, () -> randomAlphaOfLength(8)); final ResourceNotFoundException e = expectThrows( ResourceNotFoundException.class, diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionTests.java index d67978b727ca1..e49ec84bf5766 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/action/TransportPutAutoscalingPolicyActionTests.java @@ -90,7 +90,7 @@ public void testAddPolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); if (randomBoolean()) { - builder.metadata(Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadata())); + builder.metadata(Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadata())); } currentState = builder.build(); } @@ -105,7 +105,7 @@ public void testAddPolicy() { ); // ensure the new policy is in the updated cluster state - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), hasKey(request.name())); assertThat(metadata.policies().get(request.name()).policy().roles(), equalTo(request.roles())); @@ -118,7 +118,7 @@ public void testAddPolicy() { verifyNoMoreInteractions(mockLogger); // ensure that existing policies were preserved - final AutoscalingMetadata currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); if (currentMetadata != null) { for (final Map.Entry entry : currentMetadata.policies().entrySet()) { assertThat(metadata.policies(), hasKey(entry.getKey())); @@ -152,11 +152,11 @@ public void testUpdatePolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); currentState = builder.build(); } - final AutoscalingMetadata currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); final String name = randomFrom(currentMetadata.policies().keySet()); // add to the existing deciders, to ensure the policy has changed final PutAutoscalingPolicyAction.Request request = new PutAutoscalingPolicyAction.Request( @@ -180,7 +180,7 @@ public void testUpdatePolicy() { ); // ensure the updated policy is in the updated cluster state - final AutoscalingMetadata metadata = state.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata metadata = state.metadata().section(AutoscalingMetadata.NAME); assertNotNull(metadata); assertThat(metadata.policies(), hasKey(request.name())); assertThat(metadata.policies().get(request.name()).policy(), equalTo(expectedPolicy)); @@ -202,12 +202,12 @@ public void testNoOpUpdatePolicy() { { final ClusterState.Builder builder = ClusterState.builder(new ClusterName(randomAlphaOfLength(8))); builder.metadata( - Metadata.builder().putCustom(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) + Metadata.builder().putSection(AutoscalingMetadata.NAME, randomAutoscalingMetadataOfPolicyCount(randomIntBetween(1, 8))) ); currentState = builder.build(); } // randomly put an existing policy - final AutoscalingMetadata currentMetadata = currentState.metadata().custom(AutoscalingMetadata.NAME); + final AutoscalingMetadata currentMetadata = currentState.metadata().section(AutoscalingMetadata.NAME); final AutoscalingPolicy policy = randomFrom(currentMetadata.policies().values()).policy(); final PutAutoscalingPolicyAction.Request request = new PutAutoscalingPolicyAction.Request( TEST_REQUEST_TIMEOUT, diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java index 4061d37832184..4c9574c0ca333 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/AutoscalingCalculateCapacityServiceTests.java @@ -62,7 +62,7 @@ public void testMultiplePoliciesFixedCapacity() { .collect(Collectors.toMap(Tuple::v1, Tuple::v2)) ); ClusterState state = ClusterState.builder(ClusterName.DEFAULT) - .metadata(Metadata.builder().putCustom(AutoscalingMetadata.NAME, new AutoscalingMetadata(policies))) + .metadata(Metadata.builder().putSection(AutoscalingMetadata.NAME, new AutoscalingMetadata(policies))) .build(); SortedMap resultsMap = service.calculate( state, @@ -121,7 +121,7 @@ public String name() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoscalingMetadata.NAME, new AutoscalingMetadata( new TreeMap<>( diff --git a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java index 9658db911f6df..5e5cea2af9162 100644 --- a/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java +++ b/x-pack/plugin/autoscaling/src/test/java/org/elasticsearch/xpack/autoscaling/capacity/nodeinfo/AutoscalingNodesInfoServiceTests.java @@ -105,7 +105,7 @@ public void setUp() throws Exception { when(clusterService.getClusterSettings()).thenReturn(clusterSettings); service = new AutoscalingNodeInfoService(clusterService, client); autoscalingMetadata = randomAutoscalingMetadataOfPolicyCount(between(1, 8)); - metadata = Metadata.builder().putCustom(AutoscalingMetadata.NAME, autoscalingMetadata).build(); + metadata = Metadata.builder().putSection(AutoscalingMetadata.NAME, autoscalingMetadata).build(); } @After diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java index 2d77d2c770845..b25a4f453ff9a 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/AutoFollowIT.java @@ -161,7 +161,7 @@ public void testCleanFollowedLeaderIndexUUIDs() throws Exception { String leaderIndexUUID = metadata.index("copy-logs-201901") .getCustomData(Ccr.CCR_CUSTOM_METADATA_KEY) .get(Ccr.CCR_CUSTOM_METADATA_LEADER_INDEX_UUID_KEY); - AutoFollowMetadata autoFollowMetadata = metadata.custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = metadata.section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); List followedLeaderIndixUUIDs = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"); assertThat(followedLeaderIndixUUIDs.size(), equalTo(1)); @@ -175,7 +175,7 @@ public void testCleanFollowedLeaderIndexUUIDs() throws Exception { AutoFollowMetadata autoFollowMetadata = getFollowerCluster().clusterService() .state() .metadata() - .custom(AutoFollowMetadata.TYPE); + .section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); List followedLeaderIndixUUIDs = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"); assertThat(followedLeaderIndixUUIDs.size(), equalTo(0)); @@ -199,7 +199,7 @@ public void testAutoFollowManyIndices() throws Exception { autoFollowStats[0] = getAutoFollowStats(); assertThat(metadata[0].indices().size(), equalTo((int) expectedVal1)); - AutoFollowMetadata autoFollowMetadata = metadata[0].custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = metadata[0].section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal1)); assertThat(autoFollowStats[0].getNumberOfSuccessfulFollowIndices(), equalTo(expectedVal1)); }); @@ -218,7 +218,7 @@ public void testAutoFollowManyIndices() throws Exception { autoFollowStats[0] = getAutoFollowStats(); assertThat(metadata[0].indices().size(), equalTo((int) expectedVal1)); - AutoFollowMetadata autoFollowMetadata = metadata[0].custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = metadata[0].section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), nullValue()); assertThat(autoFollowStats[0].getAutoFollowedClusters().size(), equalTo(0)); }); @@ -242,7 +242,7 @@ public void testAutoFollowManyIndices() throws Exception { autoFollowStats[0] = getAutoFollowStats(); assertThat(metadata[0].indices().size(), equalTo((int) expectedVal2)); - AutoFollowMetadata autoFollowMetadata = metadata[0].custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = metadata[0].section(AutoFollowMetadata.TYPE); // expectedVal2 + 1, because logs-does-not-count is also marked as auto followed. // (This is because indices created before a pattern exists are not auto followed and are just marked as such.) assertThat(autoFollowMetadata.getFollowedLeaderIndexUUIDs().get("my-pattern"), hasSize((int) expectedVal2 + 1)); diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java index e0aa7efa75126..0299d04aced8c 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrAliasesIT.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.action.admin.indices.AliasesNotFoundException; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.xcontent.XContentBuilder; @@ -397,7 +397,7 @@ private AliasMetadata getAliasMetadata(final GetAliasesResponse response, final private CheckedRunnable assertShardFollowTask(final int numberOfPrimaryShards) { return () -> { final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); - final PersistentTasksCustomMetadata taskMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection taskMetadata = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertNotNull("task metadata for follower should exist", taskMetadata); final ListTasksRequest listTasksRequest = new ListTasksRequest(); @@ -409,11 +409,11 @@ private CheckedRunnable assertShardFollowTask(final int numberOfPrima final List taskInfos = listTasksResponse.getTasks(); assertThat("expected a task for each shard", taskInfos.size(), equalTo(numberOfPrimaryShards)); - final Collection> shardFollowTasks = taskMetadata.findTasks( + final Collection> shardFollowTasks = taskMetadata.findTasks( ShardFollowTask.NAME, Objects::nonNull ); - for (final PersistentTasksCustomMetadata.PersistentTask shardFollowTask : shardFollowTasks) { + for (final PersistentTasksMetadataSection.PersistentTask shardFollowTask : shardFollowTasks) { TaskInfo taskInfo = null; final String expectedId = "id=" + shardFollowTask.getId(); for (final TaskInfo info : taskInfos) { diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java index f1febd8aea550..b6fd48041c0b9 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/CcrLicenseIT.java @@ -181,7 +181,7 @@ public ClusterState execute(ClusterState currentState) throws Exception { ClusterState.Builder newState = ClusterState.builder(currentState); newState.metadata( - Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata).build() + Metadata.builder(currentState.getMetadata()).putSection(AutoFollowMetadata.TYPE, autoFollowMetadata).build() ); return newState.build(); } diff --git a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java index 6361b6f89605e..735b9b1edcc36 100644 --- a/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java +++ b/x-pack/plugin/ccr/src/internalClusterTest/java/org/elasticsearch/xpack/ccr/IndexFollowingIT.java @@ -73,7 +73,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseActions; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.IndexClosedException; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.snapshots.SnapshotRestoreException; @@ -1651,7 +1651,7 @@ private BooleanSupplier hasFollowIndexBeenClosed(String indexName) { private CheckedRunnable assertTask(final int numberOfPrimaryShards, final Map numDocsPerShard) { return () -> { final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); - final PersistentTasksCustomMetadata taskMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection taskMetadata = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertNotNull(taskMetadata); ListTasksRequest listTasksRequest = new ListTasksRequest(); @@ -1663,11 +1663,11 @@ private CheckedRunnable assertTask(final int numberOfPrimaryShards, f List taskInfos = listTasksResponse.getTasks(); assertThat(taskInfos.size(), equalTo(numberOfPrimaryShards)); - Collection> shardFollowTasks = taskMetadata.findTasks( + Collection> shardFollowTasks = taskMetadata.findTasks( ShardFollowTask.NAME, Objects::nonNull ); - for (PersistentTasksCustomMetadata.PersistentTask shardFollowTask : shardFollowTasks) { + for (PersistentTasksMetadataSection.PersistentTask shardFollowTask : shardFollowTasks) { final ShardFollowTask shardFollowTaskParams = (ShardFollowTask) shardFollowTask.getParams(); TaskInfo taskInfo = null; String expectedId = "id=" + shardFollowTask.getId(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRUsageTransportAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRUsageTransportAction.java index 9f3b9a3bf16dc..34da03b455201 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRUsageTransportAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/CCRUsageTransportAction.java @@ -68,7 +68,7 @@ protected void masterOperation( } } } - AutoFollowMetadata autoFollowMetadata = metadata.custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = metadata.section(AutoFollowMetadata.TYPE); int numberOfAutoFollowPatterns = autoFollowMetadata != null ? autoFollowMetadata.getPatterns().size() : 0; Long lastFollowTimeInMillis; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java index 395af34c59b3a..9f4f44f713b9f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/Ccr.java @@ -16,7 +16,7 @@ import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest; import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.cluster.service.ClusterService; @@ -314,7 +314,7 @@ public List getNamedXContent() { return Arrays.asList( // auto-follow metadata, persisted into the cluster state as XContent new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(AutoFollowMetadata.TYPE), AutoFollowMetadata::fromXContent ), diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java index 82af24d2293cc..2cbcad528ff09 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinator.java @@ -257,7 +257,7 @@ synchronized void updateStats(List results) { void updateAutoFollowers(ClusterState followerClusterState) { final AutoFollowMetadata autoFollowMetadata = followerClusterState.getMetadata() - .custom(AutoFollowMetadata.TYPE, AutoFollowMetadata.EMPTY); + .section(AutoFollowMetadata.TYPE, AutoFollowMetadata.EMPTY); if (autoFollowMetadata.getPatterns().isEmpty() && this.autoFollowers.isEmpty()) { return; @@ -465,7 +465,7 @@ void start() { lastAutoFollowTimeInMillis = relativeTimeProvider.getAsLong(); final ClusterState clusterState = followerClusterStateSupplier.get(); - final AutoFollowMetadata autoFollowMetadata = clusterState.metadata().custom(AutoFollowMetadata.TYPE); + final AutoFollowMetadata autoFollowMetadata = clusterState.metadata().section(AutoFollowMetadata.TYPE); if (autoFollowMetadata == null) { LOGGER.info("auto-follower for cluster [{}] has stopped, because there is no autofollow metadata", remoteCluster); return; @@ -877,7 +877,7 @@ static String getFollowerIndexName(AutoFollowPattern autoFollowPattern, String l static Function recordLeaderIndexAsFollowFunction(String name, Index indexToFollow) { return currentState -> { - AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().section(AutoFollowMetadata.TYPE); Map> newFollowedIndexUUIDS = new HashMap<>(currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs()); if (newFollowedIndexUUIDS.containsKey(name) == false) { // A delete auto follow pattern request can have removed the auto follow pattern while we want to update @@ -899,7 +899,7 @@ static Function recordLeaderIndexAsFollowFunction(St ); return ClusterState.builder(currentState) .metadata( - Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata).build() + Metadata.builder(currentState.getMetadata()).putSection(AutoFollowMetadata.TYPE, newAutoFollowMetadata).build() ) .build(); }; @@ -918,7 +918,7 @@ static Function cleanFollowedRemoteIndices( final List autoFollowPatternNames ) { return currentState -> { - AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().section(AutoFollowMetadata.TYPE); Map> autoFollowPatternNameToFollowedIndexUUIDs = new HashMap<>( currentAutoFollowMetadata.getFollowedLeaderIndexUUIDs() ); @@ -956,7 +956,7 @@ static Function cleanFollowedRemoteIndices( ); return ClusterState.builder(currentState) .metadata( - Metadata.builder(currentState.getMetadata()).putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata).build() + Metadata.builder(currentState.getMetadata()).putSection(AutoFollowMetadata.TYPE, newAutoFollowMetadata).build() ) .build(); } else { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskCleaner.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskCleaner.java index 7a05a4e712fc4..66dcc1eebfdb0 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskCleaner.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTaskCleaner.java @@ -22,7 +22,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.persistent.CompletionPersistentTaskAction; import org.elasticsearch.persistent.PersistentTaskResponse; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ccr.action.ShardFollowTask; @@ -62,10 +62,10 @@ public void clusterChanged(final ClusterChangedEvent event) { return; } final Metadata metadata = event.state().metadata(); - final PersistentTasksCustomMetadata persistentTasksMetadata = metadata.custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection persistentTasksMetadata = metadata.section(PersistentTasksMetadataSection.TYPE); final Metadata previousMetadata = event.previousState().metadata(); if (metadata.indices() == event.previousState().getMetadata().indices() - && persistentTasksMetadata == previousMetadata.custom(PersistentTasksCustomMetadata.TYPE) + && persistentTasksMetadata == previousMetadata.section(PersistentTasksMetadataSection.TYPE) && event.previousState().nodes().isLocalNodeElectedMaster() && event.blocksChanged() == false) { // nothing of relevance changed @@ -75,7 +75,7 @@ public void clusterChanged(final ClusterChangedEvent event) { if (persistentTasksMetadata == null) { return; } - for (PersistentTasksCustomMetadata.PersistentTask persistentTask : persistentTasksMetadata.tasks()) { + for (PersistentTasksMetadataSection.PersistentTask persistentTask : persistentTasksMetadata.tasks()) { if (ShardFollowTask.NAME.equals(persistentTask.getTaskName()) == false) { // this task is not a shard follow task continue; diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java index 585bf2491bfc4..50741da5cd718 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutor.java @@ -56,9 +56,9 @@ import org.elasticsearch.index.translog.Translog; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -152,7 +152,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask taskInProgress, + PersistentTasksMetadataSection.PersistentTask taskInProgress, Map headers ) { ShardFollowTask params = taskInProgress.getParams(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternAction.java index eef4ffa9f2499..46abf791c28da 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternAction.java @@ -79,7 +79,7 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String } static ClusterState innerActivate(final Request request, ClusterState currentState) { - final AutoFollowMetadata autoFollowMetadata = currentState.metadata().custom(AutoFollowMetadata.TYPE); + final AutoFollowMetadata autoFollowMetadata = currentState.metadata().section(AutoFollowMetadata.TYPE); if (autoFollowMetadata == null) { throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", request.getName()); } @@ -118,7 +118,7 @@ static ClusterState innerActivate(final Request request, ClusterState currentSta ); return currentState.copyAndUpdateMetadata( - metadata -> metadata.putCustom( + metadata -> metadata.putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(newPatterns, autoFollowMetadata.getFollowedLeaderIndexUUIDs(), autoFollowMetadata.getHeaders()) ) diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java index 1cc3510b73117..813fb5e9c59f5 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternAction.java @@ -71,14 +71,14 @@ private void submitUnbatchedTask(@SuppressWarnings("SameParameterValue") String } static ClusterState innerDelete(DeleteAutoFollowPatternAction.Request request, ClusterState currentState) { - AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata currentAutoFollowMetadata = currentState.metadata().section(AutoFollowMetadata.TYPE); if (currentAutoFollowMetadata == null || currentAutoFollowMetadata.getPatterns().get(request.getName()) == null) { throw new ResourceNotFoundException("auto-follow pattern [{}] is missing", request.getName()); } AutoFollowMetadata newAutoFollowMetadata = removePattern(currentAutoFollowMetadata, request.getName()); - return currentState.copyAndUpdateMetadata(metadata -> metadata.putCustom(AutoFollowMetadata.TYPE, newAutoFollowMetadata)); + return currentState.copyAndUpdateMetadata(metadata -> metadata.putSection(AutoFollowMetadata.TYPE, newAutoFollowMetadata)); } private static AutoFollowMetadata removePattern(AutoFollowMetadata metadata, String name) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java index 7bc33ce2a74cb..c4c7fe7eb4f89 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Predicates; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -82,7 +82,7 @@ protected ClusterBlockException checkBlock(FollowInfoAction.Request request, Clu static List getFollowInfos(List concreteFollowerIndices, ClusterState state) { List followerInfos = new ArrayList<>(); - PersistentTasksCustomMetadata persistentTasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); for (String index : concreteFollowerIndices) { IndexMetadata indexMetadata = state.metadata().index(index); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java index 6d84682815769..52fa48535f89e 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.license.LicenseUtils; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -124,7 +124,7 @@ protected void taskOperation( } static Set findFollowerIndicesFromShardFollowTasks(ClusterState state, String[] indices) { - final PersistentTasksCustomMetadata persistentTasksMetadata = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection persistentTasksMetadata = state.metadata().section(PersistentTasksMetadataSection.TYPE); if (persistentTasksMetadata == null) { return Collections.emptySet(); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java index 532773f857d97..0b28f7f502bf4 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternAction.java @@ -71,7 +71,7 @@ protected ClusterBlockException checkBlock(GetAutoFollowPatternAction.Request re } static Map getAutoFollowPattern(Metadata metadata, String name) { - AutoFollowMetadata autoFollowMetadata = metadata.custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = metadata.section(AutoFollowMetadata.TYPE); if (autoFollowMetadata == null) { if (name == null) { return Collections.emptyMap(); diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java index 17cd10c6c3e46..64c16882c0254 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPauseFollowAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -73,7 +73,7 @@ protected void masterOperation( listener.onFailure(new IllegalArgumentException("index [" + request.getFollowIndex() + "] is not a follower index")); return; } - PersistentTasksCustomMetadata persistentTasksMetadata = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasksMetadata = state.metadata().section(PersistentTasksMetadataSection.TYPE); if (persistentTasksMetadata == null) { listener.onFailure(new IllegalArgumentException("no shard follow tasks found")); return; @@ -86,7 +86,7 @@ protected void masterOperation( ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); return shardFollowTask.getFollowShardId().getIndexName().equals(request.getFollowIndex()); }) - .map(PersistentTasksCustomMetadata.PersistentTask::getId) + .map(PersistentTasksMetadataSection.PersistentTask::getId) .toList(); if (shardFollowTaskIds.isEmpty()) { diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java index b990b738e4bc9..f79264eecd03f 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternAction.java @@ -154,7 +154,7 @@ static ClusterState innerPut( // auto patterns are always overwritten // only already followed index uuids are updated - AutoFollowMetadata currentAutoFollowMetadata = localState.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata currentAutoFollowMetadata = localState.metadata().section(AutoFollowMetadata.TYPE); Map> followedLeaderIndices; Map patterns; Map> headers; @@ -219,7 +219,7 @@ static ClusterState innerPut( patterns.put(request.getName(), autoFollowPattern); return localState.copyAndUpdateMetadata( - metadata -> metadata.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, followedLeaderIndices, headers)) + metadata -> metadata.putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, followedLeaderIndices, headers)) ); } diff --git a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java index addf47662276d..dec8cc3b142bd 100644 --- a/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java +++ b/x-pack/plugin/ccr/src/main/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowAction.java @@ -37,7 +37,7 @@ import org.elasticsearch.index.seqno.RetentionLeaseNotFoundException; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteClusterService; @@ -266,9 +266,9 @@ static ClusterState unfollow(String followerIndex, ClusterState current) { ); } - PersistentTasksCustomMetadata persistentTasks = current.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = current.metadata().section(PersistentTasksMetadataSection.TYPE); if (persistentTasks != null) { - for (PersistentTasksCustomMetadata.PersistentTask persistentTask : persistentTasks.tasks()) { + for (PersistentTasksMetadataSection.PersistentTask persistentTask : persistentTasks.tasks()) { if (persistentTask.getTaskName().equals(ShardFollowTask.NAME)) { ShardFollowTask shardFollowTask = (ShardFollowTask) persistentTask.getParams(); if (shardFollowTask.getFollowShardId().getIndexName().equals(followerIndex)) { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java index 677a82ddafa34..a7dd40b546c20 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrIntegTestCase.java @@ -61,7 +61,7 @@ import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.license.LicensesMetadata; import org.elasticsearch.monitor.jvm.HotThreads; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.search.SearchResponseUtils; import org.elasticsearch.search.builder.SearchSourceBuilder; @@ -188,7 +188,7 @@ public final void startClusters() throws Exception { leaderCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { ClusterService clusterService = leaderCluster.getInstance(ClusterService.class); - assertNotNull(clusterService.state().metadata().custom(LicensesMetadata.TYPE)); + assertNotNull(clusterService.state().metadata().section(LicensesMetadata.TYPE)); }, 60, TimeUnit.SECONDS); String address = leaderCluster.getDataNodeInstance(TransportService.class).boundAddress().publishAddress().toString(); @@ -212,7 +212,7 @@ public final void startClusters() throws Exception { followerCluster.ensureAtLeastNumDataNodes(numberOfNodesPerCluster()); assertBusy(() -> { ClusterService clusterService = followerCluster.getInstance(ClusterService.class); - assertNotNull(clusterService.state().metadata().custom(LicensesMetadata.TYPE)); + assertNotNull(clusterService.state().metadata().section(LicensesMetadata.TYPE)); }, 60, TimeUnit.SECONDS); setupMasterNodeRequestsValidatorOnFollowerCluster(); } @@ -503,8 +503,8 @@ protected void ensureNoCcrTasks() throws Exception { ); final ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState(); - PersistentTasksCustomMetadata tasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); - Collection> ccrTasks = tasks.tasks() + PersistentTasksMetadataSection tasks = clusterState.metadata().section(PersistentTasksMetadataSection.TYPE); + Collection> ccrTasks = tasks.tasks() .stream() .filter(t -> t.getTaskName().equals(ShardFollowTask.NAME)) .toList(); @@ -863,8 +863,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { ClusterState.Builder newState = ClusterState.builder(currentState); newState.metadata( Metadata.builder(currentState.getMetadata()) - .putCustom(AutoFollowMetadata.TYPE, empty) - .removeCustom(PersistentTasksCustomMetadata.TYPE) + .putSection(AutoFollowMetadata.TYPE, empty) + .removeSection(PersistentTasksMetadataSection.TYPE) .build() ); return newState.build(); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java index 6b69c172c0df3..12da7acffe347 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/CcrSingleNodeTestCase.java @@ -73,7 +73,7 @@ public void setupLocalRemote() throws Exception { @Before public void waitForTrialLicenseToBeGenerated() throws Exception { - assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metadata().custom(LicensesMetadata.TYPE))); + assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metadata().section(LicensesMetadata.TYPE))); } @After diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRInfoTransportActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRInfoTransportActionTests.java index 1d81549f7adec..37c4b501126ba 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRInfoTransportActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/CCRInfoTransportActionTests.java @@ -136,7 +136,7 @@ public void testUsageStats() throws Exception { ); patterns.put("pattern" + i, pattern); } - metadata.putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())); + metadata.putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); Mockito.when(clusterService.state()).thenReturn(clusterState); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java index 467ef3c68f648..a417cfe66264e 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/AutoFollowCoordinatorTests.java @@ -111,7 +111,7 @@ public void testAutoFollower() { AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, autoFollowHeaders); ClusterState currentState = ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); boolean[] invoked = new boolean[] { false }; @@ -150,7 +150,7 @@ void createAndFollow( @Override void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { ClusterState resultCs = updateFunction.apply(currentState); - AutoFollowMetadata result = resultCs.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata result = resultCs.metadata().section(AutoFollowMetadata.TYPE); assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1)); handler.accept(null); @@ -181,7 +181,7 @@ public void testAutoFollower_dataStream() { AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, autoFollowHeaders); ClusterState currentState = ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); boolean[] invoked = new boolean[] { false }; @@ -220,7 +220,7 @@ void createAndFollow( @Override void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { ClusterState resultCs = updateFunction.apply(currentState); - AutoFollowMetadata result = resultCs.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata result = resultCs.metadata().section(AutoFollowMetadata.TYPE); assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1)); handler.accept(null); @@ -247,7 +247,7 @@ public void testAutoFollowerClusterStateApiFailure() { Map> headers = new HashMap<>(); AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, headers); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); Exception failure = new RuntimeException("failure"); @@ -297,7 +297,7 @@ public void testAutoFollowerUpdateClusterStateFailure() { Map> headers = new HashMap<>(); AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, headers); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); Exception failure = new RuntimeException("failure"); @@ -400,7 +400,7 @@ public void testAutoFollowerWithNoActivePatternsDoesNotStart() { ClusterState.builder(new ClusterName("test")) .metadata( Metadata.builder() - .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(autoFollowPatterns, followedLeaderIndexUUIDs, headers)) + .putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(autoFollowPatterns, followedLeaderIndexUUIDs, headers)) .build() ) .build() @@ -449,7 +449,9 @@ public void testAutoFollowerWithPausedActivePatterns() { final AtomicReference localClusterState = new AtomicReference<>( ClusterState.builder(new ClusterName("local")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(emptyMap(), emptyMap(), emptyMap()))) + .metadata( + Metadata.builder().putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(emptyMap(), emptyMap(), emptyMap())) + ) .build() ); @@ -634,7 +636,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa final ClusterState finalRemoteClusterState = remoteClusterState.get(); final ClusterState finalLocalClusterState = localClusterState.get(); - AutoFollowMetadata autoFollowMetadata = finalLocalClusterState.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = finalLocalClusterState.metadata().section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(2)); assertThat(autoFollowMetadata.getPatterns().values().stream().noneMatch(AutoFollowPattern::isActive), is(true)); @@ -676,7 +678,7 @@ public void testAutoFollowerCreateAndFollowApiCallFailure() { Map> headers = new HashMap<>(); AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, headers); ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); Exception failure = new RuntimeException("failure"); @@ -867,12 +869,12 @@ public void testRecordLeaderIndexAsFollowFunction() { Collections.emptyMap() ); ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")).metadata( - new Metadata.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + new Metadata.Builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata) ).build(); Function function = recordLeaderIndexAsFollowFunction("pattern1", new Index("index1", "index1")); ClusterState result = function.apply(clusterState); - AutoFollowMetadata autoFollowMetadataResult = result.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadataResult = result.metadata().section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1"), notNullValue()); assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1").size(), equalTo(1)); assertThat(autoFollowMetadataResult.getFollowedLeaderIndexUUIDs().get("pattern1").get(0), equalTo("index1")); @@ -885,7 +887,7 @@ public void testRecordLeaderIndexAsFollowFunctionNoEntry() { Collections.emptyMap() ); ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")).metadata( - new Metadata.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + new Metadata.Builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata) ).build(); Function function = recordLeaderIndexAsFollowFunction("pattern1", new Index("index1", "index1")); @@ -900,7 +902,7 @@ public void testCleanFollowedLeaderIndices() { Collections.emptyMap() ); ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")).metadata( - new Metadata.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + new Metadata.Builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata) ).build(); Metadata remoteMetadata = new Metadata.Builder().put( @@ -918,7 +920,7 @@ public void testCleanFollowedLeaderIndices() { .build(); Function function = cleanFollowedRemoteIndices(remoteMetadata, Collections.singletonList("pattern1")); - AutoFollowMetadata result = function.apply(clusterState).metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata result = function.apply(clusterState).metadata().section(AutoFollowMetadata.TYPE); assertThat(result.getFollowedLeaderIndexUUIDs().get("pattern1").size(), equalTo(2)); assertThat(result.getFollowedLeaderIndexUUIDs().get("pattern1").get(0), equalTo("index1")); assertThat(result.getFollowedLeaderIndexUUIDs().get("pattern1").get(1), equalTo("index3")); @@ -931,7 +933,7 @@ public void testCleanFollowedLeaderIndicesNoChanges() { Collections.emptyMap() ); ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")).metadata( - new Metadata.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + new Metadata.Builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata) ).build(); Metadata remoteMetadata = new Metadata.Builder().put( @@ -966,7 +968,7 @@ public void testCleanFollowedLeaderIndicesNoEntry() { Collections.emptyMap() ); ClusterState clusterState = new ClusterState.Builder(new ClusterName("name")).metadata( - new Metadata.Builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + new Metadata.Builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata) ).build(); Metadata remoteMetadata = new Metadata.Builder().put( @@ -1414,7 +1416,7 @@ public void testUpdateAutoFollowers() { ClusterState followerState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()) ) @@ -1438,7 +1440,7 @@ public void testUpdateAutoFollowers() { ClusterState clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) + .putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) ) .build(); autoFollowCoordinator.updateAutoFollowers(clusterState); @@ -1455,7 +1457,7 @@ public void testUpdateAutoFollowers() { clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) + .putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) ) .build(); autoFollowCoordinator.updateAutoFollowers(clusterState); @@ -1467,7 +1469,7 @@ public void testUpdateAutoFollowers() { clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) + .putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) ) .build(); autoFollowCoordinator.updateAutoFollowers(clusterState); @@ -1486,7 +1488,7 @@ public void testUpdateAutoFollowers() { clusterState = ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) + .putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) ) .build(); autoFollowCoordinator.updateAutoFollowers(clusterState); @@ -1560,7 +1562,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()) ) @@ -1624,7 +1626,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()) ) @@ -1664,7 +1666,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap()) ) @@ -1683,7 +1685,7 @@ public void testUpdateAutoFollowersNoActivePatterns() { ClusterState.builder(new ClusterName("remote")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(Collections.emptyMap(), Collections.emptyMap(), Collections.emptyMap()) ) @@ -1713,7 +1715,7 @@ public void testWaitForMetadataVersion() { ClusterState[] states = new ClusterState[16]; for (int i = 0; i < states.length; i++) { states[i] = ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); String indexName = "logs-" + i; leaderStates.add( @@ -1776,7 +1778,7 @@ public void testWaitForTimeOut() { ClusterState[] states = new ClusterState[16]; for (int i = 0; i < states.length; i++) { states[i] = ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); } Consumer> handler = results -> { fail("should not be invoked"); }; @@ -1828,7 +1830,7 @@ public void testAutoFollowerSoftDeletesDisabled() { AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(patterns, followedLeaderIndexUUIDS, autoFollowHeaders); ClusterState currentState = ClusterState.builder(new ClusterName("name")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata)) .build(); List results = new ArrayList<>(); @@ -1853,7 +1855,7 @@ void createAndFollow( @Override void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { ClusterState resultCs = updateFunction.apply(currentState); - AutoFollowMetadata result = resultCs.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata result = resultCs.metadata().section(AutoFollowMetadata.TYPE); assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1)); handler.accept(null); @@ -1909,7 +1911,7 @@ public void testAutoFollowerFollowerIndexAlreadyExists() { .numberOfShards(1) .numberOfReplicas(0) ) - .putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata) + .putSection(AutoFollowMetadata.TYPE, autoFollowMetadata) ) .build(); @@ -1935,7 +1937,7 @@ void createAndFollow( @Override void updateAutoFollowMetadata(Function updateFunction, Consumer handler) { ClusterState resultCs = updateFunction.apply(currentState); - AutoFollowMetadata result = resultCs.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata result = resultCs.metadata().section(AutoFollowMetadata.TYPE); assertThat(result.getFollowedLeaderIndexUUIDs().size(), equalTo(1)); assertThat(result.getFollowedLeaderIndexUUIDs().get("remote").size(), equalTo(1)); handler.accept(null); @@ -1990,7 +1992,7 @@ public void testRepeatedFailures() throws InterruptedException { TimeValue.ZERO ); final AutoFollowMetadata autoFollowMetadata = new AutoFollowMetadata(Map.of("remote", pattern), Map.of(), Map.of()); - when(metadata.custom(AutoFollowMetadata.TYPE)).thenReturn(autoFollowMetadata); + when(metadata.section(AutoFollowMetadata.TYPE)).thenReturn(autoFollowMetadata); final int iterations = randomIntBetween(16384, 32768); // sufficiently large to exercise that we do not stack overflow final AtomicInteger counter = new AtomicInteger(); @@ -2053,7 +2055,7 @@ public void testClosedIndicesAreNotAutoFollowed() { final ClusterState localState = ClusterState.builder(new ClusterName("local")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata( Map.of(pattern, createAutoFollowPattern("remote", "docs-*")), @@ -2148,7 +2150,7 @@ public void testExcludedPatternIndicesAreNotAutoFollowed() { final ClusterState localState = ClusterState.builder(new ClusterName("local")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata( Map.of( @@ -2236,7 +2238,7 @@ void cleanFollowedRemoteIndices(ClusterState remoteClusterState, List pa assertThat(results, notNullValue()); assertThat(results.size(), equalTo(1)); - AutoFollowMetadata autoFollowMetadata = lastModifiedClusterState.get().metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = lastModifiedClusterState.get().metadata().section(AutoFollowMetadata.TYPE); final List autoFollowedIndices = autoFollowMetadata.getFollowedLeaderIndexUUIDs().get(pattern); assertThat(autoFollowedIndices.size(), equalTo(nbLeaderIndices)); @@ -2412,7 +2414,7 @@ private AutoFollowCoordinator createAutoFollowCoordinator() { private ClusterState createClusterStateWith(Map patterns) { var builder = ClusterState.builder(new ClusterName("remote")); if (patterns != null) { - builder.metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Map.of(), Map.of()))); + builder.metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Map.of(), Map.of()))); } return builder.build(); } @@ -2449,7 +2451,7 @@ private Tuple, Set> execute final ClusterState localState = ClusterState.builder(new ClusterName("local")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata( Map.of(pattern, createAutoFollowPattern("remote", indexPattern)), @@ -2592,7 +2594,7 @@ private static Supplier localClusterStateSupplier(ClusterState... Collections.emptyMap() ); final ClusterState lastState = ClusterState.builder(new ClusterName("remote")) - .metadata(Metadata.builder().putCustom(AutoFollowMetadata.TYPE, emptyAutoFollowMetadata)) + .metadata(Metadata.builder().putSection(AutoFollowMetadata.TYPE, emptyAutoFollowMetadata)) .build(); final LinkedList queue = new LinkedList<>(Arrays.asList(states)); return () -> { diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutorAssignmentTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutorAssignmentTests.java index 630aab4c78f43..9c7c119ac0241 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutorAssignmentTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/ShardFollowTasksExecutorAssignmentTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.ccr.CcrSettings; diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java index 3a6a0d90f60ba..c78bcf5bb4497 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportActivateAutoFollowPatternActionTests.java @@ -45,7 +45,7 @@ public void testInnerActivateDoesNotExist() { ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata( singletonMap("remote_cluster", randomAutoFollowPattern()), @@ -70,7 +70,7 @@ public void testInnerActivateToggle() { final ClusterState clusterState = ClusterState.builder(new ClusterName("cluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata( singletonMap("remote_cluster", autoFollowPattern), @@ -95,10 +95,10 @@ public void testInnerActivateToggle() { ClusterState updatedState = TransportActivateAutoFollowPatternAction.innerActivate(pauseRequest, clusterState); assertThat(updatedState, not(sameInstance(clusterState))); - AutoFollowMetadata updatedAutoFollowMetadata = updatedState.getMetadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata updatedAutoFollowMetadata = updatedState.getMetadata().section(AutoFollowMetadata.TYPE); assertNotEquals(updatedAutoFollowMetadata, notNullValue()); - AutoFollowMetadata autoFollowMetadata = clusterState.getMetadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = clusterState.getMetadata().section(AutoFollowMetadata.TYPE); assertNotEquals(updatedAutoFollowMetadata, autoFollowMetadata); assertThat(updatedAutoFollowMetadata.getPatterns().size(), equalTo(autoFollowMetadata.getPatterns().size())); assertThat(updatedAutoFollowMetadata.getPatterns().get("remote_cluster").isActive(), not(autoFollowPattern.isActive())); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java index f94f23c3695bf..1e294e627a09b 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportDeleteAutoFollowPatternActionTests.java @@ -94,7 +94,7 @@ public void testInnerDelete() { ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders) ) @@ -104,7 +104,7 @@ public void testInnerDelete() { Request request = new Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "name1"); AutoFollowMetadata result = TransportDeleteAutoFollowPatternAction.innerDelete(request, clusterState) .getMetadata() - .custom(AutoFollowMetadata.TYPE); + .section(AutoFollowMetadata.TYPE); assertThat(result.getPatterns().size(), equalTo(1)); assertThat(result.getPatterns().get("name2"), notNullValue()); assertThat(result.getPatterns().get("name2").getRemoteCluster(), equalTo("asia_cluster")); @@ -147,7 +147,7 @@ public void testInnerDeleteDoesNotExist() { ClusterState clusterState = ClusterState.builder(new ClusterName("us_cluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders) ) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java index 56a3d940aa404..0aa1b5e0b60fe 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowInfoActionTests.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.core.ccr.action.FollowInfoAction.Response; @@ -46,7 +46,7 @@ public void testGetFollowInfos() { } private static ClusterState createCS(String[] indices, boolean[] followerIndices, boolean[] statuses) { - PersistentTasksCustomMetadata.Builder persistentTasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder persistentTasks = PersistentTasksMetadataSection.builder(); Metadata.Builder mdBuilder = Metadata.builder(); for (int i = 0; i < indices.length; i++) { String index = indices[i]; @@ -72,7 +72,7 @@ private static ClusterState createCS(String[] indices, boolean[] followerIndices mdBuilder.put(imdBuilder); } - mdBuilder.putCustom(PersistentTasksCustomMetadata.TYPE, persistentTasks.build()); + mdBuilder.putSection(PersistentTasksMetadataSection.TYPE, persistentTasks.build()); return ClusterState.builder(new ClusterName("_cluster")).metadata(mdBuilder.build()).build(); } diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java index 9a5afd65952a5..adca4cdaf23a8 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportFollowStatsActionTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.index.Index; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ccr.action.ShardFollowTask; @@ -40,7 +40,7 @@ public void testFindFollowerIndicesFromShardFollowTasks() { IndexMetadata index2 = IndexMetadata.builder("index2").settings(indexSettings).build(); IndexMetadata index3 = IndexMetadata.builder("index3").settings(indexSettings).build(); - PersistentTasksCustomMetadata.Builder persistentTasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection.Builder persistentTasks = PersistentTasksMetadataSection.builder() .addTask("1", ShardFollowTask.NAME, createShardFollowTask(index1.getIndex()), null) .addTask("2", ShardFollowTask.NAME, createShardFollowTask(index2.getIndex()), null) .addTask("3", ShardFollowTask.NAME, createShardFollowTask(index3.getIndex()), null); @@ -48,7 +48,7 @@ public void testFindFollowerIndicesFromShardFollowTasks() { ClusterState clusterState = ClusterState.builder(new ClusterName("_cluster")) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, persistentTasks.build()) + .putSection(PersistentTasksMetadataSection.TYPE, persistentTasks.build()) // only add index1 and index2 .put(index1, false) .put(index2, false) diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java index deab8dce2c821..30f7a57f023b2 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportGetAutoFollowPatternActionTests.java @@ -68,7 +68,7 @@ public void testGetAutoFollowPattern() { ) ); Metadata metadata = Metadata.builder() - .putCustom(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) + .putSection(AutoFollowMetadata.TYPE, new AutoFollowMetadata(patterns, Collections.emptyMap(), Collections.emptyMap())) .build(); Map result = TransportGetAutoFollowPatternAction.getAutoFollowPattern(metadata, "name1"); @@ -92,7 +92,7 @@ public void testGetAutoFollowPatternNoAutoFollowPatterns() { Collections.emptyMap(), Collections.emptyMap() ); - Metadata metadata = Metadata.builder().putCustom(AutoFollowMetadata.TYPE, autoFollowMetadata).build(); + Metadata metadata = Metadata.builder().putSection(AutoFollowMetadata.TYPE, autoFollowMetadata).build(); expectThrows(ResourceNotFoundException.class, () -> TransportGetAutoFollowPatternAction.getAutoFollowPattern(metadata, "name1")); Map result = TransportGetAutoFollowPatternAction.getAutoFollowPattern(metadata, null); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java index ea37cdf1aae0c..688a15d924bac 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportPutAutoFollowPatternActionTests.java @@ -47,7 +47,7 @@ public void testInnerPut() { ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster")).metadata(Metadata.builder()).build(); ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, null, localState, remoteState); - AutoFollowMetadata autoFollowMetadata = result.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = result.metadata().section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); assertThat(autoFollowMetadata.getPatterns().get("name1").getRemoteCluster(), equalTo("eu_cluster")); @@ -107,7 +107,7 @@ public void testInnerPut_existingLeaderIndices() { ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster")).metadata(mdBuilder).build(); ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, null, localState, remoteState); - AutoFollowMetadata autoFollowMetadata = result.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = result.metadata().section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); assertThat(autoFollowMetadata.getPatterns().get("name1").getRemoteCluster(), equalTo("eu_cluster")); @@ -172,7 +172,7 @@ public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() { ClusterState localState = ClusterState.builder(new ClusterName("us_cluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( AutoFollowMetadata.TYPE, new AutoFollowMetadata(existingAutoFollowPatterns, existingAlreadyFollowedIndexUUIDS, existingHeaders) ) @@ -196,7 +196,7 @@ public void testInnerPut_existingLeaderIndicesAndAutoFollowMetadata() { ClusterState remoteState = ClusterState.builder(new ClusterName("eu_cluster")).metadata(mdBuilder).build(); ClusterState result = TransportPutAutoFollowPatternAction.innerPut(request, null, localState, remoteState); - AutoFollowMetadata autoFollowMetadata = result.metadata().custom(AutoFollowMetadata.TYPE); + AutoFollowMetadata autoFollowMetadata = result.metadata().section(AutoFollowMetadata.TYPE); assertThat(autoFollowMetadata, notNullValue()); assertThat(autoFollowMetadata.getPatterns().size(), equalTo(1)); assertThat(autoFollowMetadata.getPatterns().get("name1").getRemoteCluster(), equalTo("eu_cluster")); diff --git a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java index bfaac52f92855..1fe2a2f123d0c 100644 --- a/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java +++ b/x-pack/plugin/ccr/src/test/java/org/elasticsearch/xpack/ccr/action/TransportUnfollowActionTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.ccr.Ccr; import org.elasticsearch.xpack.ccr.CcrSettings; @@ -93,7 +93,7 @@ public void testUnfollowRunningShardFollowTasks() { TimeValue.timeValueMillis(10), Collections.emptyMap() ); - PersistentTasksCustomMetadata.PersistentTask task = new PersistentTasksCustomMetadata.PersistentTask<>( + PersistentTasksMetadataSection.PersistentTask task = new PersistentTasksMetadataSection.PersistentTask<>( "id", ShardFollowTask.NAME, params, @@ -105,9 +105,9 @@ public void testUnfollowRunningShardFollowTasks() { .metadata( Metadata.builder() .put(followerIndex) - .putCustom( - PersistentTasksCustomMetadata.TYPE, - new PersistentTasksCustomMetadata(0, Collections.singletonMap("id", task)) + .putSection( + PersistentTasksMetadataSection.TYPE, + new PersistentTasksMetadataSection(0, Collections.singletonMap("id", task)) ) .build() ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java index b352a9abce886..e19257fa63193 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/ClusterStateLicenseService.java @@ -257,13 +257,13 @@ public ClusterState execute(ClusterState currentState) throws Exception { ); } Metadata currentMetadata = currentState.metadata(); - LicensesMetadata licensesMetadata = currentMetadata.custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = currentMetadata.section(LicensesMetadata.TYPE); TrialLicenseVersion trialVersion = null; if (licensesMetadata != null) { trialVersion = licensesMetadata.getMostRecentTrialVersion(); } Metadata.Builder mdBuilder = Metadata.builder(currentMetadata); - mdBuilder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(newLicense, trialVersion)); + mdBuilder.putSection(LicensesMetadata.TYPE, new LicensesMetadata(newLicense, trialVersion)); return ClusterState.builder(currentState).metadata(mdBuilder).build(); } }); @@ -324,7 +324,7 @@ public License getLicense() { } private LicensesMetadata getLicensesMetadata() { - return this.clusterService.state().metadata().custom(LicensesMetadata.TYPE); + return this.clusterService.state().metadata().section(LicensesMetadata.TYPE); } @Override @@ -385,7 +385,7 @@ protected void doStart() throws ElasticsearchException { if (clusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false && clusterState.nodes().getMasterNode() != null && XPackPlugin.isReadyForXPackCustomMetadata(clusterState)) { - final LicensesMetadata currentMetadata = clusterState.metadata().custom(LicensesMetadata.TYPE); + final LicensesMetadata currentMetadata = clusterState.metadata().section(LicensesMetadata.TYPE); boolean noLicense = currentMetadata == null || currentMetadata.getLicense() == null; if (clusterState.getNodes().isLocalNodeElectedMaster() && (noLicense || LicenseUtils.licenseNeedsExtended(currentMetadata.getLicense()))) { @@ -416,8 +416,8 @@ public void clusterChanged(ClusterChangedEvent event) { final ClusterState previousClusterState = event.previousState(); final ClusterState currentClusterState = event.state(); if (currentClusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) == false) { - final LicensesMetadata prevLicensesMetadata = previousClusterState.getMetadata().custom(LicensesMetadata.TYPE); - final LicensesMetadata currentLicensesMetadata = currentClusterState.getMetadata().custom(LicensesMetadata.TYPE); + final LicensesMetadata prevLicensesMetadata = previousClusterState.getMetadata().section(LicensesMetadata.TYPE); + final LicensesMetadata currentLicensesMetadata = currentClusterState.getMetadata().section(LicensesMetadata.TYPE); // notify all interested plugins if (previousClusterState.blocks().hasGlobalBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK) || prevLicensesMetadata == null) { if (currentLicensesMetadata != null) { @@ -456,8 +456,8 @@ private void updateXPackLicenseState(License license) { } private void maybeRegisterOrUpdateLicense(ClusterState previousClusterState, ClusterState currentClusterState) { - final LicensesMetadata prevLicensesMetadata = previousClusterState.getMetadata().custom(LicensesMetadata.TYPE); - final LicensesMetadata currentLicensesMetadata = currentClusterState.getMetadata().custom(LicensesMetadata.TYPE); + final LicensesMetadata prevLicensesMetadata = previousClusterState.getMetadata().section(LicensesMetadata.TYPE); + final LicensesMetadata currentLicensesMetadata = currentClusterState.getMetadata().section(LicensesMetadata.TYPE); License currentLicense = null; boolean noLicenseInPrevMetadata = prevLicensesMetadata == null || prevLicensesMetadata.getLicense() == null; if (noLicenseInPrevMetadata == false) { @@ -536,7 +536,7 @@ SchedulerEngine.Schedule nextLicenseCheck(License license) { } public License getLicense(final Metadata metadata) { - final LicensesMetadata licensesMetadata = metadata.custom(LicensesMetadata.TYPE); + final LicensesMetadata licensesMetadata = metadata.section(LicensesMetadata.TYPE); return getLicenseFromLicensesMetadata(licensesMetadata); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java index e06d14341b61e..407df3cb944f1 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/LicensesMetadata.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +28,7 @@ /** * Contains metadata about registered licenses */ -public class LicensesMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public class LicensesMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "licenses"; @@ -186,8 +187,8 @@ public LicensesMetadata(StreamInput streamInput) throws IOException { } } - public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { - return readDiffFrom(Metadata.Custom.class, TYPE, streamInput); + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(MetadataSection.class, TYPE, streamInput); } public static License extractLicense(LicensesMetadata licensesMetadata) { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java index 95b24d586161a..0a71cfbcd8231 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartBasicClusterTask.java @@ -123,7 +123,7 @@ static class Executor implements ClusterStateTaskExecutor public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { final var initialState = batchExecutionContext.initialState(); XPackPlugin.checkReadyForXPackCustomMetadata(initialState); - final LicensesMetadata originalLicensesMetadata = initialState.metadata().custom(LicensesMetadata.TYPE); + final LicensesMetadata originalLicensesMetadata = initialState.metadata().section(LicensesMetadata.TYPE); var currentLicensesMetadata = originalLicensesMetadata; for (final var taskContext : batchExecutionContext.taskContexts()) { try (var ignored = taskContext.captureResponseHeaders()) { @@ -134,7 +134,7 @@ public ClusterState execute(BatchExecutionContext batchEx return initialState; } else { return ClusterState.builder(initialState) - .metadata(Metadata.builder(initialState.metadata()).putCustom(LicensesMetadata.TYPE, currentLicensesMetadata)) + .metadata(Metadata.builder(initialState.metadata()).putSection(LicensesMetadata.TYPE, currentLicensesMetadata)) .build(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java index 22f4de105cb2d..735e229f383e2 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartTrialClusterTask.java @@ -117,7 +117,7 @@ static class Executor implements ClusterStateTaskExecutor public ClusterState execute(BatchExecutionContext batchExecutionContext) throws Exception { final var initialState = batchExecutionContext.initialState(); XPackPlugin.checkReadyForXPackCustomMetadata(initialState); - final LicensesMetadata originalLicensesMetadata = initialState.metadata().custom(LicensesMetadata.TYPE); + final LicensesMetadata originalLicensesMetadata = initialState.metadata().section(LicensesMetadata.TYPE); var currentLicensesMetadata = originalLicensesMetadata; for (final var taskContext : batchExecutionContext.taskContexts()) { try (var ignored = taskContext.captureResponseHeaders()) { @@ -128,7 +128,7 @@ public ClusterState execute(BatchExecutionContext batchEx return initialState; } else { return ClusterState.builder(initialState) - .metadata(Metadata.builder(initialState.metadata()).putCustom(LicensesMetadata.TYPE, currentLicensesMetadata)) + .metadata(Metadata.builder(initialState.metadata()).putSection(LicensesMetadata.TYPE, currentLicensesMetadata)) .build(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java index accae7fee596d..d2dc22a848f69 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/StartupSelfGeneratedLicenseTask.java @@ -43,7 +43,7 @@ public StartupSelfGeneratedLicenseTask(Settings settings, Clock clock, ClusterSe @Override public void clusterStateProcessed(ClusterState oldState, ClusterState newState) { - LicensesMetadata licensesMetadata = newState.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = newState.metadata().section(LicensesMetadata.TYPE); if (logger.isDebugEnabled()) { logger.debug("registered self generated license: {}", licensesMetadata); } @@ -53,7 +53,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) public ClusterState execute(ClusterState currentState) throws Exception { XPackPlugin.checkReadyForXPackCustomMetadata(currentState); final Metadata metadata = currentState.metadata(); - final LicensesMetadata currentLicensesMetadata = metadata.custom(LicensesMetadata.TYPE); + final LicensesMetadata currentLicensesMetadata = metadata.section(LicensesMetadata.TYPE); // do not generate a license if any license is present if (currentLicensesMetadata == null) { License.LicenseType type = SelfGeneratedLicense.validateSelfGeneratedType( @@ -89,7 +89,7 @@ private ClusterState updateLicenseSignature(ClusterState currentState, LicensesM License selfGeneratedLicense = SelfGeneratedLicense.create(specBuilder); TrialLicenseVersion trialVersion = currentLicenseMetadata.getMostRecentTrialVersion(); LicensesMetadata newLicenseMetadata = new LicensesMetadata(selfGeneratedLicense, trialVersion); - mdBuilder.putCustom(LicensesMetadata.TYPE, newLicenseMetadata); + mdBuilder.putSection(LicensesMetadata.TYPE, newLicenseMetadata); logger.info( "Updating existing license to the new version.\n\nOld license:\n {}\n\n New license:\n{}", license, @@ -112,7 +112,7 @@ private ClusterState extendBasic(ClusterState currentState, LicensesMetadata cur License license = currentLicenseMetadata.getLicense(); Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); LicensesMetadata newLicenseMetadata = createBasicLicenseFromExistingLicense(currentLicenseMetadata); - mdBuilder.putCustom(LicensesMetadata.TYPE, newLicenseMetadata); + mdBuilder.putSection(LicensesMetadata.TYPE, newLicenseMetadata); logger.info(""" Existing basic license has an expiration. Basic licenses no longer expire.Regenerating license. @@ -161,7 +161,7 @@ private ClusterState updateWithLicense(ClusterState currentState, License.Licens } else { licensesMetadata = new LicensesMetadata(selfGeneratedLicense, null); } - mdBuilder.putCustom(LicensesMetadata.TYPE, licensesMetadata); + mdBuilder.putSection(LicensesMetadata.TYPE, licensesMetadata); return ClusterState.builder(currentState).metadata(mdBuilder).build(); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java index ea8e8c84940b4..17bb67a7ba37c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetBasicStatusAction.java @@ -50,7 +50,7 @@ protected void masterOperation( ClusterState state, ActionListener listener ) throws Exception { - LicensesMetadata licensesMetadata = state.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = state.metadata().section(LicensesMetadata.TYPE); if (licensesMetadata == null) { listener.onResponse(new GetBasicStatusResponse(true)); } else { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java index 78b00286f0d72..12cb72d661d48 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/license/TransportGetTrialStatusAction.java @@ -55,7 +55,7 @@ protected void masterOperation( ActionListener listener ) throws Exception { if (licenseService instanceof ClusterStateLicenseService) { - LicensesMetadata licensesMetadata = state.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = state.metadata().section(LicensesMetadata.TYPE); listener.onResponse(new GetTrialStatusResponse(licensesMetadata == null || licensesMetadata.isEligibleForTrial())); } else { listener.onResponse(new GetTrialStatusResponse(false)); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a2c3e40c76ae4..2ce230c7e43a9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -8,7 +8,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.Setting; import org.elasticsearch.index.query.QueryBuilder; @@ -161,7 +161,7 @@ public List getNamedWriteables() { ConfigurableClusterPrivileges.WriteProfileDataPrivileges::createFrom ), // security : role-mappings - new NamedWriteableRegistry.Entry(Metadata.Custom.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, RoleMappingMetadata.TYPE, RoleMappingMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AllExpression.NAME, AllExpression::new), new NamedWriteableRegistry.Entry(RoleMapperExpression.class, AnyExpression.NAME, AnyExpression::new), @@ -181,11 +181,11 @@ public List getNamedWriteables() { // sql new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SQL, SqlFeatureSetUsage::new), // watcher - new NamedWriteableRegistry.Entry(Metadata.Custom.class, WatcherMetadata.TYPE, WatcherMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, WatcherMetadata.TYPE, WatcherMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, WatcherMetadata.TYPE, WatcherMetadata::readDiffFrom), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.WATCHER, WatcherFeatureSetUsage::new), // licensing - new NamedWriteableRegistry.Entry(Metadata.Custom.class, LicensesMetadata.TYPE, LicensesMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, LicensesMetadata.TYPE, LicensesMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, LicensesMetadata.TYPE, LicensesMetadata::readDiffFrom), // rollup new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.ROLLUP, RollupFeatureSetUsage::new), @@ -195,30 +195,30 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(Task.Status.class, DownsampleShardStatus.NAME, DownsampleShardStatus::new), // ccr new NamedWriteableRegistry.Entry(AutoFollowMetadata.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), - new NamedWriteableRegistry.Entry(Metadata.Custom.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, AutoFollowMetadata.TYPE, AutoFollowMetadata::new), new NamedWriteableRegistry.Entry( NamedDiff.class, AutoFollowMetadata.TYPE, - in -> AutoFollowMetadata.readDiffFrom(Metadata.Custom.class, AutoFollowMetadata.TYPE, in) + in -> AutoFollowMetadata.readDiffFrom(MetadataSection.class, AutoFollowMetadata.TYPE, in) ), // ILM new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.INDEX_LIFECYCLE, IndexLifecycleFeatureSetUsage::new), // SLM new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.SNAPSHOT_LIFECYCLE, SLMFeatureSetUsage::new), // ILM - Custom Metadata - new NamedWriteableRegistry.Entry(Metadata.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new), new NamedWriteableRegistry.Entry( NamedDiff.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.IndexLifecycleMetadataDiff::new ), - new NamedWriteableRegistry.Entry(Metadata.Custom.class, LifecycleOperationMetadata.TYPE, LifecycleOperationMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, LifecycleOperationMetadata.TYPE, LifecycleOperationMetadata::new), new NamedWriteableRegistry.Entry( NamedDiff.class, LifecycleOperationMetadata.TYPE, LifecycleOperationMetadata.LifecycleOperationMetadataDiff::new ), - new NamedWriteableRegistry.Entry(Metadata.Custom.class, SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata::new), new NamedWriteableRegistry.Entry( NamedDiff.class, SnapshotLifecycleMetadata.TYPE, @@ -240,7 +240,7 @@ public List getNamedWriteables() { new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, MigrateAction.NAME, MigrateAction::readFrom), // Transforms - new NamedWriteableRegistry.Entry(Metadata.Custom.class, TransformMetadata.TYPE, TransformMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, TransformMetadata.TYPE, TransformMetadata::new), new NamedWriteableRegistry.Entry(NamedDiff.class, TransformMetadata.TYPE, TransformMetadata.TransformMetadataDiff::new), new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new), @@ -313,7 +313,7 @@ public List getNamedXContent() { return Arrays.asList( // ML - Custom metadata new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField("ml"), parser -> MlMetadata.LENIENT_PARSER.parse(parser, null).build() ), @@ -352,9 +352,9 @@ public List getNamedXContent() { SnapshotUpgradeTaskState::fromXContent ), // watcher - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(WatcherMetadata.TYPE), WatcherMetadata::fromXContent), + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(WatcherMetadata.TYPE), WatcherMetadata::fromXContent), // licensing - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(LicensesMetadata.TYPE), LicensesMetadata::fromXContent), + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(LicensesMetadata.TYPE), LicensesMetadata::fromXContent), // rollup new NamedXContentRegistry.Entry(PersistentTaskParams.class, new ParseField(RollupField.TASK_NAME), RollupJob::fromXContent), new NamedXContentRegistry.Entry(Task.Status.class, new ParseField(RollupJobStatus.NAME), RollupJobStatus::fromXContent), @@ -372,7 +372,7 @@ public List getNamedXContent() { TransformState::fromXContent ), new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(TransformMetadata.TYPE), parser -> TransformMetadata.LENIENT_PARSER.parse(parser, null).build() ), @@ -383,7 +383,7 @@ public List getNamedXContent() { SecurityMigrationTaskParams::fromXContent ), new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(RoleMappingMetadata.TYPE), RoleMappingMetadata::fromXContent ) diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java index f79a3fbf124b1..b798e8835729b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackPlugin.java @@ -295,12 +295,12 @@ public static List nodesNotReadyForXPackCustomMetadata(ClusterSta private static boolean alreadyContainsXPackCustomMetadata(ClusterState clusterState) { final Metadata metadata = clusterState.metadata(); - return metadata.custom(LicensesMetadata.TYPE) != null - || metadata.custom(MlMetadata.TYPE) != null - || metadata.custom(WatcherMetadata.TYPE) != null + return metadata.section(LicensesMetadata.TYPE) != null + || metadata.section(MlMetadata.TYPE) != null + || metadata.section(WatcherMetadata.TYPE) != null || RoleMappingMetadata.getFromClusterState(clusterState).isEmpty() == false || clusterState.custom(TokenMetadata.TYPE) != null - || metadata.custom(TransformMetadata.TYPE) != null; + || metadata.section(TransformMetadata.TYPE) != null; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java index f5620b557bf26..ee31a94216e3f 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ccr/AutoFollowMetadata.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.metadata.DataStream; import org.elasticsearch.cluster.metadata.IndexAbstraction; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -42,7 +43,7 @@ /** * Custom metadata that contains auto follow patterns and what leader indices an auto follow pattern has already followed. */ -public class AutoFollowMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public class AutoFollowMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "ccr_auto_follow"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java index 0cd5d617752f4..1cc2a13e1b03d 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/enrich/EnrichMetadata.java @@ -11,6 +11,7 @@ import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; @@ -30,7 +31,7 @@ /** * Encapsulates enrich policies as custom metadata inside cluster state. */ -public final class EnrichMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public final class EnrichMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "enrich"; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java index d4f2ecb36e95d..b56d0371b1448 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleMetadata.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.Metadata.Custom; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -34,7 +34,7 @@ import java.util.function.Function; import java.util.stream.Collectors; -public class IndexLifecycleMetadata implements Metadata.Custom { +public class IndexLifecycleMetadata implements MetadataSection { public static final String TYPE = "index_lifecycle"; public static final ParseField OPERATION_MODE_FIELD = new ParseField("operation_mode"); public static final ParseField POLICIES_FIELD = new ParseField("policies"); @@ -100,7 +100,7 @@ public Map getPolicies() { } @Override - public Diff diff(Custom previousState) { + public Diff diff(MetadataSection previousState) { return new IndexLifecycleMetadataDiff((IndexLifecycleMetadata) previousState, this); } @@ -149,7 +149,7 @@ public String toString() { return Strings.toString(this, false, true); } - public static class IndexLifecycleMetadataDiff implements NamedDiff { + public static class IndexLifecycleMetadataDiff implements NamedDiff { final Diff> policies; final OperationMode operationMode; @@ -170,7 +170,7 @@ public IndexLifecycleMetadataDiff(StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { TreeMap newPolicies = new TreeMap<>( policies.apply(((IndexLifecycleMetadata) part).policyMetadatas) ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java index 529eb16b668c3..2282294eba772 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadata.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -32,7 +33,7 @@ * Class that encapsulates the running operation mode of Index Lifecycle * Management and Snapshot Lifecycle Management */ -public class LifecycleOperationMetadata implements Metadata.Custom { +public class LifecycleOperationMetadata implements MetadataSection { public static final String TYPE = "lifecycle_operation"; public static final ParseField ILM_OPERATION_MODE_FIELD = new ParseField("ilm_operation_mode"); public static final ParseField SLM_OPERATION_MODE_FIELD = new ParseField("slm_operation_mode"); @@ -69,8 +70,8 @@ public LifecycleOperationMetadata(StreamInput in) throws IOException { */ @SuppressWarnings("deprecated") public static OperationMode currentILMMode(final ClusterState state) { - IndexLifecycleMetadata oldMetadata = state.metadata().custom(IndexLifecycleMetadata.TYPE); - LifecycleOperationMetadata currentMetadata = state.metadata().custom(LifecycleOperationMetadata.TYPE); + IndexLifecycleMetadata oldMetadata = state.metadata().section(IndexLifecycleMetadata.TYPE); + LifecycleOperationMetadata currentMetadata = state.metadata().section(LifecycleOperationMetadata.TYPE); return Optional.ofNullable(currentMetadata) .map(LifecycleOperationMetadata::getILMOperationMode) .orElse( @@ -88,8 +89,8 @@ public static OperationMode currentILMMode(final ClusterState state) { */ @SuppressWarnings("deprecated") public static OperationMode currentSLMMode(final ClusterState state) { - SnapshotLifecycleMetadata oldMetadata = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); - LifecycleOperationMetadata currentMetadata = state.metadata().custom(LifecycleOperationMetadata.TYPE); + SnapshotLifecycleMetadata oldMetadata = state.metadata().section(SnapshotLifecycleMetadata.TYPE); + LifecycleOperationMetadata currentMetadata = state.metadata().section(LifecycleOperationMetadata.TYPE); return Optional.ofNullable(currentMetadata) .map(LifecycleOperationMetadata::getSLMOperationMode) .orElse( @@ -114,7 +115,7 @@ public OperationMode getSLMOperationMode() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new LifecycleOperationMetadata.LifecycleOperationMetadataDiff((LifecycleOperationMetadata) previousState, this); } @@ -164,7 +165,7 @@ public String toString() { return Strings.toString(this, true, true); } - public static class LifecycleOperationMetadataDiff implements NamedDiff { + public static class LifecycleOperationMetadataDiff implements NamedDiff { final OperationMode ilmOperationMode; final OperationMode slmOperationMode; @@ -180,7 +181,7 @@ public LifecycleOperationMetadataDiff(StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new LifecycleOperationMetadata(this.ilmOperationMode, this.slmOperationMode); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java index 1072e6ee4c899..139c72e2c64db 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTask.java @@ -108,7 +108,7 @@ private ClusterState updateILMState(final ClusterState currentState) { return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.metadata()) - .putCustom(LifecycleOperationMetadata.TYPE, new LifecycleOperationMetadata(newMode, currentSLMMode(currentState))) + .putSection(LifecycleOperationMetadata.TYPE, new LifecycleOperationMetadata(newMode, currentSLMMode(currentState))) ) .build(); } @@ -136,7 +136,7 @@ private ClusterState updateSLMState(final ClusterState currentState) { return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.metadata()) - .putCustom(LifecycleOperationMetadata.TYPE, new LifecycleOperationMetadata(currentILMMode(currentState), newMode)) + .putSection(LifecycleOperationMetadata.TYPE, new LifecycleOperationMetadata(currentILMMode(currentState), newMode)) ) .build(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStep.java index d900f168be06c..ada5ae9628506 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStep.java @@ -11,7 +11,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import org.elasticsearch.xpack.core.ccr.action.ShardFollowTask; @@ -32,13 +32,14 @@ public boolean isRetryable() { @Override void innerPerformAction(String followerIndex, ClusterState currentClusterState, ActionListener listener) { - PersistentTasksCustomMetadata persistentTasksMetadata = currentClusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasksMetadata = currentClusterState.metadata() + .section(PersistentTasksMetadataSection.TYPE); if (persistentTasksMetadata == null) { listener.onResponse(null); return; } - List> shardFollowTasks = persistentTasksMetadata.tasks() + List> shardFollowTasks = persistentTasksMetadata.tasks() .stream() .filter(persistentTask -> ShardFollowTask.NAME.equals(persistentTask.getTaskName())) .filter(persistentTask -> { diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java index 7ce81fa90a557..6c80cb137c267 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStep.java @@ -68,7 +68,7 @@ public void evaluateCondition(Metadata metadata, Index index, Listener listener, return; } - SnapshotLifecycleMetadata snapMeta = metadata.custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata snapMeta = metadata.section(SnapshotLifecycleMetadata.TYPE); if (snapMeta == null || snapMeta.getSnapshotConfigurations().containsKey(policy) == false) { listener.onFailure(error(POLICY_NOT_FOUND_MESSAGE, policy)); return; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java index e791f5f474f5c..27f828c69b9cb 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlMetadata.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -34,7 +35,7 @@ import java.util.Objects; import java.util.SortedMap; -public class MlMetadata implements Metadata.Custom { +public class MlMetadata implements MetadataSection { public static final String TYPE = "ml"; public static final ParseField UPGRADE_MODE = new ParseField("upgrade_mode"); @@ -81,7 +82,7 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new MlMetadataDiff((MlMetadata) previousState, this); } @@ -128,7 +129,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore ); } - public static class MlMetadataDiff implements NamedDiff { + public static class MlMetadataDiff implements NamedDiff { final boolean upgradeMode; final boolean resetMode; @@ -158,7 +159,7 @@ public MlMetadataDiff(StreamInput in) throws IOException { * @return The new ML metadata. */ @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new MlMetadata(upgradeMode, resetMode); } @@ -245,7 +246,7 @@ public MlMetadata build() { } public static MlMetadata getMlMetadata(ClusterState state) { - MlMetadata mlMetadata = (state == null) ? null : state.getMetadata().custom(TYPE); + MlMetadata mlMetadata = (state == null) ? null : state.getMetadata().section(TYPE); if (mlMetadata == null) { return EMPTY_METADATA; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java index 6281f656954e5..ef8ff8329809b 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/MlTasks.java @@ -12,7 +12,7 @@ import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ml.datafeed.DatafeedState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsState; import org.elasticsearch.xpack.core.ml.dataframe.DataFrameAnalyticsTaskState; @@ -55,11 +55,11 @@ public final class MlTasks { public static final String JOB_SNAPSHOT_UPGRADE_TASK_ID_PREFIX = "job-snapshot-upgrade-"; private static final String DOWNLOAD_MODEL_TASK_DESCRIPTION_PREFIX = "model_id-"; - public static final PersistentTasksCustomMetadata.Assignment AWAITING_UPGRADE = new PersistentTasksCustomMetadata.Assignment( + public static final PersistentTasksMetadataSection.Assignment AWAITING_UPGRADE = new PersistentTasksMetadataSection.Assignment( null, "persistent task cannot be assigned while upgrade mode is enabled." ); - public static final PersistentTasksCustomMetadata.Assignment RESET_IN_PROGRESS = new PersistentTasksCustomMetadata.Assignment( + public static final PersistentTasksMetadataSection.Assignment RESET_IN_PROGRESS = new PersistentTasksMetadataSection.Assignment( null, "persistent task will not be assigned as a feature reset is in progress." ); @@ -119,31 +119,34 @@ public static String trainedModelAssignmentTaskDescription(String deploymentId) } @Nullable - public static PersistentTasksCustomMetadata.PersistentTask getJobTask(String jobId, @Nullable PersistentTasksCustomMetadata tasks) { + public static PersistentTasksMetadataSection.PersistentTask getJobTask( + String jobId, + @Nullable PersistentTasksMetadataSection tasks + ) { return tasks == null ? null : tasks.getTask(jobTaskId(jobId)); } @Nullable - public static PersistentTasksCustomMetadata.PersistentTask getDatafeedTask( + public static PersistentTasksMetadataSection.PersistentTask getDatafeedTask( String datafeedId, - @Nullable PersistentTasksCustomMetadata tasks + @Nullable PersistentTasksMetadataSection tasks ) { return tasks == null ? null : tasks.getTask(datafeedTaskId(datafeedId)); } @Nullable - public static PersistentTasksCustomMetadata.PersistentTask getDataFrameAnalyticsTask( + public static PersistentTasksMetadataSection.PersistentTask getDataFrameAnalyticsTask( String analyticsId, - @Nullable PersistentTasksCustomMetadata tasks + @Nullable PersistentTasksMetadataSection tasks ) { return tasks == null ? null : tasks.getTask(dataFrameAnalyticsTaskId(analyticsId)); } @Nullable - public static PersistentTasksCustomMetadata.PersistentTask getSnapshotUpgraderTask( + public static PersistentTasksMetadataSection.PersistentTask getSnapshotUpgraderTask( String jobId, String snapshotId, - @Nullable PersistentTasksCustomMetadata tasks + @Nullable PersistentTasksMetadataSection tasks ) { return tasks == null ? null : tasks.getTask(snapshotUpgradeTaskId(jobId, snapshotId)); } @@ -153,8 +156,8 @@ public static PersistentTasksCustomMetadata.PersistentTask getSnapshotUpgrade * Use {@link #getJobStateModifiedForReassignments} to return a value adjusted to the most * appropriate value following relocations. */ - public static JobState getJobState(String jobId, @Nullable PersistentTasksCustomMetadata tasks) { - PersistentTasksCustomMetadata.PersistentTask task = getJobTask(jobId, tasks); + public static JobState getJobState(String jobId, @Nullable PersistentTasksMetadataSection tasks) { + PersistentTasksMetadataSection.PersistentTask task = getJobTask(jobId, tasks); if (task != null) { JobTaskState jobTaskState = (JobTaskState) task.getState(); if (jobTaskState == null) { @@ -166,11 +169,11 @@ public static JobState getJobState(String jobId, @Nullable PersistentTasksCustom return JobState.CLOSED; } - public static JobState getJobStateModifiedForReassignments(String jobId, @Nullable PersistentTasksCustomMetadata tasks) { + public static JobState getJobStateModifiedForReassignments(String jobId, @Nullable PersistentTasksMetadataSection tasks) { return getJobStateModifiedForReassignments(getJobTask(jobId, tasks)); } - public static JobState getJobStateModifiedForReassignments(@Nullable PersistentTasksCustomMetadata.PersistentTask task) { + public static JobState getJobStateModifiedForReassignments(@Nullable PersistentTasksMetadataSection.PersistentTask task) { if (task == null) { // A closed job has no persistent task return JobState.CLOSED; @@ -196,8 +199,8 @@ public static JobState getJobStateModifiedForReassignments(@Nullable PersistentT return jobState; } - public static Instant getLastJobTaskStateChangeTime(String jobId, @Nullable PersistentTasksCustomMetadata tasks) { - PersistentTasksCustomMetadata.PersistentTask task = getJobTask(jobId, tasks); + public static Instant getLastJobTaskStateChangeTime(String jobId, @Nullable PersistentTasksMetadataSection tasks) { + PersistentTasksMetadataSection.PersistentTask task = getJobTask(jobId, tasks); if (task != null) { JobTaskState jobTaskState = (JobTaskState) task.getState(); if (jobTaskState != null) { @@ -210,12 +213,12 @@ public static Instant getLastJobTaskStateChangeTime(String jobId, @Nullable Pers public static SnapshotUpgradeState getSnapshotUpgradeState( String jobId, String snapshotId, - @Nullable PersistentTasksCustomMetadata tasks + @Nullable PersistentTasksMetadataSection tasks ) { return getSnapshotUpgradeState(getSnapshotUpgraderTask(jobId, snapshotId, tasks)); } - public static SnapshotUpgradeState getSnapshotUpgradeState(@Nullable PersistentTasksCustomMetadata.PersistentTask task) { + public static SnapshotUpgradeState getSnapshotUpgradeState(@Nullable PersistentTasksMetadataSection.PersistentTask task) { if (task == null) { return SnapshotUpgradeState.STOPPED; } @@ -228,12 +231,12 @@ public static SnapshotUpgradeState getSnapshotUpgradeState(@Nullable PersistentT return taskState.getState(); } - public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksCustomMetadata tasks) { - PersistentTasksCustomMetadata.PersistentTask task = getDatafeedTask(datafeedId, tasks); + public static DatafeedState getDatafeedState(String datafeedId, @Nullable PersistentTasksMetadataSection tasks) { + PersistentTasksMetadataSection.PersistentTask task = getDatafeedTask(datafeedId, tasks); return getDatafeedState(task); } - public static DatafeedState getDatafeedState(PersistentTasksCustomMetadata.PersistentTask task) { + public static DatafeedState getDatafeedState(PersistentTasksMetadataSection.PersistentTask task) { if (task == null) { // If we haven't started a datafeed then there will be no persistent task, // which is the same as if the datafeed wasn't started @@ -248,12 +251,12 @@ public static DatafeedState getDatafeedState(PersistentTasksCustomMetadata.Persi return taskState; } - public static DataFrameAnalyticsState getDataFrameAnalyticsState(String analyticsId, @Nullable PersistentTasksCustomMetadata tasks) { - PersistentTasksCustomMetadata.PersistentTask task = getDataFrameAnalyticsTask(analyticsId, tasks); + public static DataFrameAnalyticsState getDataFrameAnalyticsState(String analyticsId, @Nullable PersistentTasksMetadataSection tasks) { + PersistentTasksMetadataSection.PersistentTask task = getDataFrameAnalyticsTask(analyticsId, tasks); return getDataFrameAnalyticsState(task); } - public static DataFrameAnalyticsState getDataFrameAnalyticsState(@Nullable PersistentTasksCustomMetadata.PersistentTask task) { + public static DataFrameAnalyticsState getDataFrameAnalyticsState(@Nullable PersistentTasksMetadataSection.PersistentTask task) { if (task == null) { return DataFrameAnalyticsState.STOPPED; } @@ -277,8 +280,8 @@ public static DataFrameAnalyticsState getDataFrameAnalyticsState(@Nullable Persi return state; } - public static Instant getLastDataFrameAnalyticsTaskStateChangeTime(String analyticsId, @Nullable PersistentTasksCustomMetadata tasks) { - PersistentTasksCustomMetadata.PersistentTask task = getDataFrameAnalyticsTask(analyticsId, tasks); + public static Instant getLastDataFrameAnalyticsTaskStateChangeTime(String analyticsId, @Nullable PersistentTasksMetadataSection tasks) { + PersistentTasksMetadataSection.PersistentTask task = getDataFrameAnalyticsTask(analyticsId, tasks); if (task != null) { DataFrameAnalyticsTaskState taskState = (DataFrameAnalyticsTaskState) task.getState(); if (taskState != null) { @@ -296,7 +299,7 @@ public static Instant getLastDataFrameAnalyticsTaskStateChangeTime(String analyt * @param tasks Persistent tasks. If null an empty set is returned. * @return The job Ids of anomaly detector job tasks */ - public static Set openJobIds(@Nullable PersistentTasksCustomMetadata tasks) { + public static Set openJobIds(@Nullable PersistentTasksMetadataSection tasks) { if (tasks == null) { return Collections.emptySet(); } @@ -304,7 +307,9 @@ public static Set openJobIds(@Nullable PersistentTasksCustomMetadata tas return openJobTasks(tasks).stream().map(t -> t.getId().substring(JOB_TASK_ID_PREFIX.length())).collect(Collectors.toSet()); } - public static Collection> openJobTasks(@Nullable PersistentTasksCustomMetadata tasks) { + public static Collection> openJobTasks( + @Nullable PersistentTasksMetadataSection tasks + ) { if (tasks == null) { return Collections.emptyList(); } @@ -312,8 +317,8 @@ public static Collection> openJo return tasks.findTasks(JOB_TASK_NAME, Predicates.always()); } - public static Collection> datafeedTasksOnNode( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> datafeedTasksOnNode( + @Nullable PersistentTasksMetadataSection tasks, String nodeId ) { if (tasks == null) { @@ -323,8 +328,8 @@ public static Collection> datafe return tasks.findTasks(DATAFEED_TASK_NAME, task -> nodeId.equals(task.getExecutorNode())); } - public static Collection> jobTasksOnNode( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> jobTasksOnNode( + @Nullable PersistentTasksMetadataSection tasks, String nodeId ) { if (tasks == null) { @@ -334,8 +339,8 @@ public static Collection> jobTas return tasks.findTasks(JOB_TASK_NAME, task -> nodeId.equals(task.getExecutorNode())); } - public static Collection> nonFailedJobTasksOnNode( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> nonFailedJobTasksOnNode( + @Nullable PersistentTasksMetadataSection tasks, String nodeId ) { if (tasks == null) { @@ -354,8 +359,8 @@ public static Collection> nonFai }); } - public static Collection> snapshotUpgradeTasks( - @Nullable PersistentTasksCustomMetadata tasks + public static Collection> snapshotUpgradeTasks( + @Nullable PersistentTasksMetadataSection tasks ) { if (tasks == null) { return Collections.emptyList(); @@ -364,8 +369,8 @@ public static Collection> snapsh return tasks.findTasks(JOB_SNAPSHOT_UPGRADE_TASK_NAME, Predicates.always()); } - public static Collection> snapshotUpgradeTasksOnNode( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> snapshotUpgradeTasksOnNode( + @Nullable PersistentTasksMetadataSection tasks, String nodeId ) { if (tasks == null) { @@ -375,8 +380,8 @@ public static Collection> snapsh return tasks.findTasks(JOB_SNAPSHOT_UPGRADE_TASK_NAME, task -> nodeId.equals(task.getExecutorNode())); } - public static Collection> nonFailedSnapshotUpgradeTasksOnNode( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> nonFailedSnapshotUpgradeTasksOnNode( + @Nullable PersistentTasksMetadataSection tasks, String nodeId ) { if (tasks == null) { @@ -404,7 +409,7 @@ public static Collection> nonFai * @param nodes The cluster nodes * @return The job Ids of tasks to do not have an assignment. */ - public static Set unassignedJobIds(@Nullable PersistentTasksCustomMetadata tasks, DiscoveryNodes nodes) { + public static Set unassignedJobIds(@Nullable PersistentTasksMetadataSection tasks, DiscoveryNodes nodes) { return unassignedJobTasks(tasks, nodes).stream() .map(task -> task.getId().substring(JOB_TASK_ID_PREFIX.length())) .collect(Collectors.toSet()); @@ -412,14 +417,14 @@ public static Set unassignedJobIds(@Nullable PersistentTasksCustomMetada /** * The job tasks that do not have an assignment as determined by - * {@link PersistentTasksClusterService#needsReassignment(PersistentTasksCustomMetadata.Assignment, DiscoveryNodes)} + * {@link PersistentTasksClusterService#needsReassignment(PersistentTasksMetadataSection.Assignment, DiscoveryNodes)} * * @param tasks Persistent tasks. If null an empty set is returned. * @param nodes The cluster nodes * @return Unassigned job tasks */ - public static Collection> unassignedJobTasks( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> unassignedJobTasks( + @Nullable PersistentTasksMetadataSection tasks, DiscoveryNodes nodes ) { if (tasks == null) { @@ -435,7 +440,7 @@ public static Collection> unassi * @param tasks Persistent tasks. If null an empty set is returned. * @return The Ids of running datafeed tasks */ - public static Set startedDatafeedIds(@Nullable PersistentTasksCustomMetadata tasks) { + public static Set startedDatafeedIds(@Nullable PersistentTasksMetadataSection tasks) { if (tasks == null) { return Collections.emptySet(); } @@ -454,7 +459,7 @@ public static Set startedDatafeedIds(@Nullable PersistentTasksCustomMeta * @param nodes The cluster nodes * @return The job Ids of tasks to do not have an assignment. */ - public static Set unassignedDatafeedIds(@Nullable PersistentTasksCustomMetadata tasks, DiscoveryNodes nodes) { + public static Set unassignedDatafeedIds(@Nullable PersistentTasksMetadataSection tasks, DiscoveryNodes nodes) { return unassignedDatafeedTasks(tasks, nodes).stream() .map(task -> task.getId().substring(DATAFEED_TASK_ID_PREFIX.length())) @@ -463,14 +468,14 @@ public static Set unassignedDatafeedIds(@Nullable PersistentTasksCustomM /** * The datafeed tasks that do not have an assignment as determined by - * {@link PersistentTasksClusterService#needsReassignment(PersistentTasksCustomMetadata.Assignment, DiscoveryNodes)} + * {@link PersistentTasksClusterService#needsReassignment(PersistentTasksMetadataSection.Assignment, DiscoveryNodes)} * * @param tasks Persistent tasks. If null an empty set is returned. * @param nodes The cluster nodes * @return Unassigned datafeed tasks */ - public static Collection> unassignedDatafeedTasks( - @Nullable PersistentTasksCustomMetadata tasks, + public static Collection> unassignedDatafeedTasks( + @Nullable PersistentTasksMetadataSection tasks, DiscoveryNodes nodes ) { if (tasks == null) { @@ -480,7 +485,7 @@ public static Collection> unassi return tasks.findTasks(DATAFEED_TASK_NAME, task -> PersistentTasksClusterService.needsReassignment(task.getAssignment(), nodes)); } - public static MemoryTrackedTaskState getMemoryTrackedTaskState(PersistentTasksCustomMetadata.PersistentTask task) { + public static MemoryTrackedTaskState getMemoryTrackedTaskState(PersistentTasksMetadataSection.PersistentTask task) { String taskName = task.getTaskName(); return switch (taskName) { case JOB_TASK_NAME -> getJobStateModifiedForReassignments(task); @@ -490,7 +495,7 @@ public static MemoryTrackedTaskState getMemoryTrackedTaskState(PersistentTasksCu }; } - public static Set> findMlProcessTasks(@Nullable PersistentTasksCustomMetadata tasks) { + public static Set> findMlProcessTasks(@Nullable PersistentTasksMetadataSection tasks) { if (tasks == null) { return Set.of(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java index fafb9afa99f85..8141fc0ca6c13 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/action/GetDatafeedsStatsAction.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.core.Nullable; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -361,13 +361,13 @@ public Builder setDatafeedRuntimeState(GetDatafeedRunningStateAction.Response da return this; } - public Response build(PersistentTasksCustomMetadata tasksInProgress, ClusterState state) { + public Response build(PersistentTasksMetadataSection tasksInProgress, ClusterState state) { List stats = statsBuilders.stream().map(statsBuilder -> { final String jobId = datafeedToJobId.get(statsBuilder.datafeedId); DatafeedTimingStats timingStats = jobId == null ? null : timingStatsMap.getOrDefault(jobId, new DatafeedTimingStats(jobId)); - PersistentTasksCustomMetadata.PersistentTask maybeTask = MlTasks.getDatafeedTask( + PersistentTasksMetadataSection.PersistentTask maybeTask = MlTasks.getDatafeedTask( statsBuilder.datafeedId, tasksInProgress ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java index e61517569445b..1ea5c56a7f961 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/dataframe/DataFrameAnalyticsTaskState.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; @@ -115,7 +115,7 @@ public boolean isFailed() { return DataFrameAnalyticsState.FAILED.equals(state); } - public boolean isStatusStale(PersistentTasksCustomMetadata.PersistentTask task) { + public boolean isStatusStale(PersistentTasksMetadataSection.PersistentTask task) { return allocationId != task.getAllocationId(); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java index 1d6c5e564a442..7e780969de868 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/ModelAliasMetadata.java @@ -15,6 +15,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; @@ -36,18 +37,18 @@ /** * Custom {@link Metadata} implementation for storing a map of model aliases that point to model IDs */ -public class ModelAliasMetadata implements Metadata.Custom { +public class ModelAliasMetadata implements MetadataSection { public static final String NAME = "trained_model_alias"; public static final ModelAliasMetadata EMPTY = new ModelAliasMetadata(new HashMap<>()); public static ModelAliasMetadata fromState(ClusterState cs) { - ModelAliasMetadata modelAliasMetadata = cs.metadata().custom(NAME); + ModelAliasMetadata modelAliasMetadata = cs.metadata().section(NAME); return modelAliasMetadata == null ? EMPTY : modelAliasMetadata; } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new ModelAliasMetadataDiff(in); } @@ -97,7 +98,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new ModelAliasMetadataDiff((ModelAliasMetadata) previousState, this); } @@ -129,7 +130,7 @@ public String getModelId(String modelAlias) { return entry.modelId; } - static class ModelAliasMetadataDiff implements NamedDiff { + static class ModelAliasMetadataDiff implements NamedDiff { final Diff> modelAliasesDiff; @@ -147,7 +148,7 @@ static class ModelAliasMetadataDiff implements NamedDiff { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new ModelAliasMetadata(modelAliasesDiff.apply(((ModelAliasMetadata) part).modelAliases)); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelCacheMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelCacheMetadata.java index 35c6bf96a09e1..f1e8ee1bab387 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelCacheMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/TrainedModelCacheMetadata.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -26,7 +27,7 @@ import java.util.Iterator; import java.util.Objects; -public class TrainedModelCacheMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public class TrainedModelCacheMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String NAME = "trained_model_cache_metadata"; public static final TrainedModelCacheMetadata EMPTY = new TrainedModelCacheMetadata(0L); private static final ParseField VERSION_FIELD = new ParseField("version"); @@ -47,12 +48,12 @@ public static TrainedModelCacheMetadata fromXContent(XContentParser parser) { } public static TrainedModelCacheMetadata fromState(ClusterState clusterState) { - TrainedModelCacheMetadata cacheMetadata = clusterState.getMetadata().custom(NAME); + TrainedModelCacheMetadata cacheMetadata = clusterState.getMetadata().section(NAME); return cacheMetadata == null ? EMPTY : cacheMetadata; } - public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { - return readDiffFrom(Metadata.Custom.class, NAME, streamInput); + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(MetadataSection.class, NAME, streamInput); } private final long version; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java index 36fec9ec7b243..bb219696aae66 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/inference/assignment/TrainedModelAssignmentMetadata.java @@ -17,6 +17,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -39,7 +40,7 @@ import static org.elasticsearch.cluster.metadata.Metadata.ALL_CONTEXTS; -public class TrainedModelAssignmentMetadata implements Metadata.Custom { +public class TrainedModelAssignmentMetadata implements MetadataSection { private static final TrainedModelAssignmentMetadata EMPTY = new TrainedModelAssignmentMetadata(Collections.emptyMap()); public static final String DEPRECATED_NAME = "trained_model_allocation"; @@ -59,11 +60,11 @@ public static TrainedModelAssignmentMetadata fromStreamOld(StreamInput input) th return new TrainedModelAssignmentMetadata(input, DEPRECATED_NAME); } - public static NamedDiff readDiffFrom(StreamInput in) throws IOException { + public static NamedDiff readDiffFrom(StreamInput in) throws IOException { return new TrainedModelAssignmentMetadata.TrainedModeAssignmentDiff(in); } - public static NamedDiff readDiffFromOld(StreamInput in) throws IOException { + public static NamedDiff readDiffFromOld(StreamInput in) throws IOException { return new TrainedModelAssignmentMetadata.TrainedModeAssignmentDiff(in, DEPRECATED_NAME); } @@ -72,9 +73,9 @@ public static Builder builder(ClusterState clusterState) { } public static TrainedModelAssignmentMetadata fromState(ClusterState clusterState) { - TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = clusterState.getMetadata().custom(NAME); + TrainedModelAssignmentMetadata trainedModelAssignmentMetadata = clusterState.getMetadata().section(NAME); if (trainedModelAssignmentMetadata == null) { - trainedModelAssignmentMetadata = clusterState.getMetadata().custom(DEPRECATED_NAME); + trainedModelAssignmentMetadata = clusterState.getMetadata().section(DEPRECATED_NAME); } return trainedModelAssignmentMetadata == null ? EMPTY : trainedModelAssignmentMetadata; } @@ -139,7 +140,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new TrainedModeAssignmentDiff((TrainedModelAssignmentMetadata) previousState, this); } @@ -267,7 +268,7 @@ private TrainedModelAssignmentMetadata build(String writeableName) { } } - public static class TrainedModeAssignmentDiff implements NamedDiff { + public static class TrainedModeAssignmentDiff implements NamedDiff { private final Diff> modelRoutingEntries; private final String writeableName; @@ -306,7 +307,7 @@ private TrainedModeAssignmentDiff(final StreamInput in, String writeableName) th } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new TrainedModelAssignmentMetadata( new TreeMap<>(modelRoutingEntries.apply(((TrainedModelAssignmentMetadata) part).deploymentRoutingEntries)), writeableName diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java index 2d03d4273013d..237dec800a765 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/job/config/JobTaskState.java @@ -11,7 +11,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.xcontent.ConstructingObjectParser; import org.elasticsearch.xcontent.ObjectParser; import org.elasticsearch.xcontent.ParseField; diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java index 1be495a8a82f5..c4c357dc8cb00 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractor.java @@ -46,7 +46,7 @@ public static int countInferenceProcessors(ClusterState state) { if (metadata == null) { return 0; } - IngestMetadata ingestMetadata = metadata.custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = metadata.section(IngestMetadata.TYPE); if (ingestMetadata == null) { return 0; } @@ -110,7 +110,7 @@ public static Map> pipelineIdsByResource(ClusterState state, if (metadata == null) { return pipelineIdsByModelIds; } - IngestMetadata ingestMetadata = metadata.custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = metadata.section(IngestMetadata.TYPE); if (ingestMetadata == null) { return pipelineIdsByModelIds; } @@ -142,7 +142,7 @@ public static Set pipelineIdsForResource(ClusterState state, Set if (metadata == null) { return pipelineIds; } - IngestMetadata ingestMetadata = metadata.custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = metadata.section(IngestMetadata.TYPE); if (ingestMetadata == null) { return pipelineIds; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java index da6ff6ad24c34..f1aa53a387ce5 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/security/authz/RoleMappingMetadata.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -34,7 +35,7 @@ import static org.elasticsearch.cluster.metadata.Metadata.ALL_CONTEXTS; import static org.elasticsearch.xcontent.ConstructingObjectParser.constructorArg; -public final class RoleMappingMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public final class RoleMappingMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "role_mappings"; @@ -57,7 +58,7 @@ public final class RoleMappingMetadata extends AbstractNamedDiffable roleMappings; @@ -81,14 +82,14 @@ public boolean isEmpty() { public ClusterState updateClusterState(ClusterState clusterState) { if (isEmpty()) { // prefer no role mapping custom metadata to the empty role mapping metadata - return clusterState.copyAndUpdateMetadata(b -> b.removeCustom(RoleMappingMetadata.TYPE)); + return clusterState.copyAndUpdateMetadata(b -> b.removeSection(RoleMappingMetadata.TYPE)); } else { - return clusterState.copyAndUpdateMetadata(b -> b.putCustom(RoleMappingMetadata.TYPE, this)); + return clusterState.copyAndUpdateMetadata(b -> b.putSection(RoleMappingMetadata.TYPE, this)); } } - public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { - return readDiffFrom(Metadata.Custom.class, TYPE, streamInput); + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(MetadataSection.class, TYPE, streamInput); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java index 9610f70689897..b6dcb060dc388 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecycleMetadata.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.SimpleDiffable; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.io.stream.StreamInput; @@ -41,7 +42,7 @@ * Custom cluster state metadata that stores all the snapshot lifecycle * policies and their associated metadata */ -public class SnapshotLifecycleMetadata implements Metadata.Custom { +public class SnapshotLifecycleMetadata implements MetadataSection { public static final String TYPE = "snapshot_lifecycle"; @@ -121,7 +122,7 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new SnapshotLifecycleMetadataDiff((SnapshotLifecycleMetadata) previousState, this); } @@ -178,7 +179,7 @@ public boolean equals(Object obj) { && this.slmStats.equals(other.slmStats); } - public static class SnapshotLifecycleMetadataDiff implements NamedDiff { + public static class SnapshotLifecycleMetadataDiff implements NamedDiff { final Diff> lifecycles; final OperationMode operationMode; @@ -206,7 +207,7 @@ public SnapshotLifecycleMetadataDiff(StreamInput in) throws IOException { } @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { TreeMap newLifecycles = new TreeMap<>( lifecycles.apply(((SnapshotLifecycleMetadata) part).snapshotConfigurations) ); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java index 87092c45bf032..710473709f342 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistry.java @@ -366,7 +366,7 @@ private static boolean templateDependenciesSatisfied(ClusterState state, Compone if (settings == null) { return true; } - IngestMetadata ingestMetadata = state.metadata().custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = state.metadata().section(IngestMetadata.TYPE); String defaultPipeline = settings.get("index.default_pipeline"); if (defaultPipeline != null) { if (ingestMetadata == null || ingestMetadata.getPipelines().containsKey(defaultPipeline) == false) { @@ -580,7 +580,7 @@ private void addIndexLifecyclePoliciesIfMissing(ClusterState state) { logger.trace("running in data stream lifecycle only mode. skipping the installation of ILM policies."); return; } - IndexLifecycleMetadata metadata = state.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata metadata = state.metadata().section(IndexLifecycleMetadata.TYPE); for (LifecyclePolicy policy : getLifecyclePolicies()) { final AtomicBoolean creationCheck = policyCreationsInProgress.computeIfAbsent( policy.getName(), @@ -717,7 +717,7 @@ private static boolean pipelineDependenciesExist(ClusterState state, List maybeMeta = Optional.ofNullable(state.metadata().custom(IngestMetadata.TYPE)); + Optional maybeMeta = Optional.ofNullable(state.metadata().section(IngestMetadata.TYPE)); return maybeMeta.map(ingestMetadata -> ingestMetadata.getPipelines().get(pipelineId)).orElse(null); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java index e54cf2edb2690..0b01c58fac417 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/transform/TransformMetadata.java @@ -13,6 +13,7 @@ import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; @@ -27,7 +28,7 @@ import java.util.Iterator; import java.util.Objects; -public class TransformMetadata implements Metadata.Custom { +public class TransformMetadata implements MetadataSection { public static final String TYPE = "transform"; public static final ParseField RESET_MODE = new ParseField("reset_mode"); @@ -69,7 +70,7 @@ public EnumSet context() { } @Override - public Diff diff(Metadata.Custom previousState) { + public Diff diff(MetadataSection previousState) { return new TransformMetadata.TransformMetadataDiff((TransformMetadata) previousState, this); } @@ -87,7 +88,7 @@ public Iterator toXContentChunked(ToXContent.Params ignore return ChunkedToXContentHelper.field(RESET_MODE.getPreferredName(), resetMode); } - public static class TransformMetadataDiff implements NamedDiff { + public static class TransformMetadataDiff implements NamedDiff { final boolean resetMode; @@ -105,7 +106,7 @@ public TransformMetadataDiff(StreamInput in) throws IOException { * @return The new transform metadata. */ @Override - public Metadata.Custom apply(Metadata.Custom part) { + public MetadataSection apply(MetadataSection part) { return new TransformMetadata(resetMode); } @@ -170,7 +171,7 @@ public TransformMetadata build() { } public static TransformMetadata getTransformMetadata(ClusterState state) { - TransformMetadata TransformMetadata = (state == null) ? null : state.getMetadata().custom(TYPE); + TransformMetadata TransformMetadata = (state == null) ? null : state.getMetadata().section(TYPE); if (TransformMetadata == null) { return EMPTY_METADATA; } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java index 994bb8b75178e..a99e1ed288c8c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/watcher/WatcherMetadata.java @@ -11,6 +11,7 @@ import org.elasticsearch.cluster.AbstractNamedDiffable; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.xcontent.ChunkedToXContentHelper; @@ -23,7 +24,7 @@ import java.util.Iterator; import java.util.Objects; -public class WatcherMetadata extends AbstractNamedDiffable implements Metadata.Custom { +public class WatcherMetadata extends AbstractNamedDiffable implements MetadataSection { public static final String TYPE = "watcher"; @@ -56,8 +57,8 @@ public WatcherMetadata(StreamInput streamInput) throws IOException { this(streamInput.readBoolean()); } - public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { - return readDiffFrom(Metadata.Custom.class, TYPE, streamInput); + public static NamedDiff readDiffFrom(StreamInput streamInput) throws IOException { + return readDiffFrom(MetadataSection.class, TYPE, streamInput); } @Override @@ -85,7 +86,7 @@ public int hashCode() { return Objects.hash(manuallyStopped); } - public static Metadata.Custom fromXContent(XContentParser parser) throws IOException { + public static MetadataSection fromXContent(XContentParser parser) throws IOException { XContentParser.Token token; Boolean manuallyStopped = null; String currentFieldName = null; diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java index 27e5c1213f1f9..c20300026d7fb 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractClusterStateLicenseServiceTestCase.java @@ -78,7 +78,7 @@ protected void setInitialState(License license, XPackLicenseState licenseState, final ClusterBlocks noBlock = ClusterBlocks.builder().build(); when(state.blocks()).thenReturn(noBlock); Metadata metadata = mock(Metadata.class); - when(metadata.custom(LicensesMetadata.TYPE)).thenReturn(new LicensesMetadata(license, null)); + when(metadata.section(LicensesMetadata.TYPE)).thenReturn(new LicensesMetadata(license, null)); when(state.metadata()).thenReturn(metadata); final DiscoveryNode mockNode = getLocalNode(); when(discoveryNodes.getMasterNode()).thenReturn(mockNode); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java index a09f7d5ca3f52..14189f25e4cb6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/AbstractLicensesIntegrationTestCase.java @@ -50,7 +50,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) @Override public ClusterState execute(ClusterState currentState) throws Exception { Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); - mdBuilder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); + mdBuilder.putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); return ClusterState.builder(currentState).metadata(mdBuilder).build(); } @@ -78,7 +78,7 @@ public void clusterStateProcessed(ClusterState oldState, ClusterState newState) @Override public ClusterState execute(ClusterState currentState) throws Exception { Metadata.Builder mdBuilder = Metadata.builder(currentState.metadata()); - mdBuilder.removeCustom(LicensesMetadata.TYPE); + mdBuilder.removeSection(LicensesMetadata.TYPE); return ClusterState.builder(currentState).metadata(mdBuilder).build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java index c0c7c5c59d24b..91c296eeceae6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/ClusterStateLicenseServiceTests.java @@ -221,7 +221,7 @@ public void testStartBasicStartsNewLicenseIfFieldsDifferent() throws Exception { ); License oldLicense = sign(buildLicense(License.LicenseType.BASIC, TimeValue.timeValueDays(randomIntBetween(1, 100)), maxNodes)); ClusterState oldState = ClusterState.EMPTY_STATE.copyAndUpdateMetadata( - m -> m.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(oldLicense, null)) + m -> m.putSection(LicensesMetadata.TYPE, new LicensesMetadata(oldLicense, null)) ); ClusterState updatedState = taskExecutorCaptor.getValue() diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java index 5eba6ff46d093..70f4cdf867391 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseClusterChangeTests.java @@ -46,7 +46,7 @@ public void teardown() { public void testNotificationOnNewLicense() throws Exception { ClusterState oldState = ClusterState.builder(new ClusterName("a")).build(); final License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - Metadata metadata = Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null)).build(); + Metadata metadata = Metadata.builder().putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, null)).build(); ClusterState newState = ClusterState.builder(new ClusterName("a")).metadata(metadata).build(); licenseService.clusterChanged(new ClusterChangedEvent("simulated", newState, oldState)); assertThat(licenseState.activeUpdates.size(), equalTo(1)); @@ -55,7 +55,7 @@ public void testNotificationOnNewLicense() throws Exception { public void testNoNotificationOnExistingLicense() throws Exception { final License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(24)); - Metadata metadata = Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null)).build(); + Metadata metadata = Metadata.builder().putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, null)).build(); ClusterState newState = ClusterState.builder(new ClusterName("a")).metadata(metadata).build(); ClusterState oldState = ClusterState.builder(newState).build(); licenseService.clusterChanged(new ClusterChangedEvent("simulated", newState, oldState)); @@ -74,7 +74,7 @@ public void testSelfGeneratedLicenseGeneration() throws Exception { ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); verify(clusterService, times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture()); ClusterState stateWithLicense = stateUpdater.getValue().execute(newState); - LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licenseMetadata = stateWithLicense.metadata().section(LicensesMetadata.TYPE); assertNotNull(licenseMetadata); assertNotNull(licenseMetadata.getLicense()); assertEquals(licenseType, licenseMetadata.getLicense().type()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java index 68dc9f7c4df38..e67b660632cb9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicenseRegistrationTests.java @@ -33,7 +33,7 @@ public void testSelfGeneratedTrialLicense() throws Exception { ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture()); ClusterState stateWithLicense = stateUpdater.getValue().execute(state); - LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licenseMetadata = stateWithLicense.metadata().section(LicensesMetadata.TYPE); assertNotNull(licenseMetadata); assertNotNull(licenseMetadata.getLicense()); assertFalse(licenseMetadata.isEligibleForTrial()); @@ -54,7 +54,7 @@ public void testSelfGeneratedBasicLicense() throws Exception { ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture()); ClusterState stateWithLicense = stateUpdater.getValue().execute(state); - LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licenseMetadata = stateWithLicense.metadata().section(LicensesMetadata.TYPE); assertNotNull(licenseMetadata); assertNotNull(licenseMetadata.getLicense()); assertTrue(licenseMetadata.isEligibleForTrial()); @@ -83,12 +83,12 @@ public void testNonSelfGeneratedBasicLicenseIsReplaced() throws Exception { licenseService.start(); Metadata.Builder mdBuilder = Metadata.builder(); - mdBuilder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); + mdBuilder.putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); ClusterState state = ClusterState.builder(new ClusterName("a")).metadata(mdBuilder.build()).build(); ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture()); ClusterState stateWithLicense = stateUpdater.getValue().execute(state); - LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licenseMetadata = stateWithLicense.metadata().section(LicensesMetadata.TYPE); assertNotNull(licenseMetadata); assertNotNull(licenseMetadata.getLicense()); assertTrue(licenseMetadata.isEligibleForTrial()); @@ -115,12 +115,12 @@ public void testExpiredSelfGeneratedBasicLicenseIsExtended() throws Exception { licenseService.start(); Metadata.Builder mdBuilder = Metadata.builder(); - mdBuilder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); + mdBuilder.putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); ClusterState state = ClusterState.builder(new ClusterName("a")).metadata(mdBuilder.build()).build(); ArgumentCaptor stateUpdater = ArgumentCaptor.forClass(ClusterStateUpdateTask.class); verify(clusterService, Mockito.times(1)).submitUnbatchedStateUpdateTask(any(), stateUpdater.capture()); ClusterState stateWithLicense = stateUpdater.getValue().execute(state); - LicensesMetadata licenseMetadata = stateWithLicense.metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licenseMetadata = stateWithLicense.metadata().section(LicensesMetadata.TYPE); assertNotNull(licenseMetadata); assertNotNull(licenseMetadata.getLicense()); assertTrue(licenseMetadata.isEligibleForTrial()); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java index 0ea55617d86b2..af69cc8705f6d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesManagerServiceTests.java @@ -53,7 +53,7 @@ protected boolean resetNodeAfterTest() { @Before public void waitForTrialLicenseToBeGenerated() throws Exception { - assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metadata().custom(LicensesMetadata.TYPE))); + assertBusy(() -> assertNotNull(getInstanceFromNode(ClusterService.class).state().metadata().section(LicensesMetadata.TYPE))); } public void testStoreAndGetLicenses() throws Exception { @@ -65,7 +65,7 @@ public void testStoreAndGetLicenses() throws Exception { TestUtils.registerAndAckSignedLicenses(licenseService, silverLicense, LicensesStatus.VALID); License platinumLicense = TestUtils.generateSignedLicense("platinum", TimeValue.timeValueHours(1)); TestUtils.registerAndAckSignedLicenses(licenseService, platinumLicense, LicensesStatus.VALID); - LicensesMetadata licensesMetadata = clusterService.state().metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = clusterService.state().metadata().section(LicensesMetadata.TYPE); assertThat(licensesMetadata.getLicense(), equalTo(platinumLicense)); final License getLicenses = licenseService.getLicense(); assertThat(getLicenses, equalTo(platinumLicense)); @@ -79,13 +79,13 @@ public void testEffectiveLicenses() throws Exception { License goldLicense = TestUtils.generateSignedLicense("gold", TimeValue.timeValueSeconds(5)); // put gold license TestUtils.registerAndAckSignedLicenses(licenseService, goldLicense, LicensesStatus.VALID); - LicensesMetadata licensesMetadata = clusterService.state().metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = clusterService.state().metadata().section(LicensesMetadata.TYPE); assertThat(licenseService.getLicenseFromLicensesMetadata(licensesMetadata), equalTo(goldLicense)); License platinumLicense = TestUtils.generateSignedLicense("platinum", TimeValue.timeValueSeconds(3)); // put platinum license TestUtils.registerAndAckSignedLicenses(licenseService, platinumLicense, LicensesStatus.VALID); - licensesMetadata = clusterService.state().metadata().custom(LicensesMetadata.TYPE); + licensesMetadata = clusterService.state().metadata().section(LicensesMetadata.TYPE); assertThat(licenseService.getLicenseFromLicensesMetadata(licensesMetadata), equalTo(platinumLicense)); } @@ -104,7 +104,7 @@ public void testInvalidLicenseStorage() throws Exception { TestUtils.registerAndAckSignedLicenses(licenseService, tamperedLicense, LicensesStatus.INVALID); // ensure that the invalid license never made it to cluster state - LicensesMetadata licensesMetadata = clusterService.state().metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = clusterService.state().metadata().section(LicensesMetadata.TYPE); assertThat(licensesMetadata.getLicense(), not(equalTo(tamperedLicense))); } @@ -115,12 +115,12 @@ public void testRemoveLicenses() throws Exception { // generate signed licenses License license = TestUtils.generateSignedLicense(TimeValue.timeValueHours(1)); TestUtils.registerAndAckSignedLicenses(licenseService, license, LicensesStatus.VALID); - LicensesMetadata licensesMetadata = clusterService.state().metadata().custom(LicensesMetadata.TYPE); + LicensesMetadata licensesMetadata = clusterService.state().metadata().section(LicensesMetadata.TYPE); assertThat(licensesMetadata.getLicense(), not(LicensesMetadata.LICENSE_TOMBSTONE)); // remove signed licenses removeAndAckSignedLicenses(licenseService); - licensesMetadata = clusterService.state().metadata().custom(LicensesMetadata.TYPE); + licensesMetadata = clusterService.state().metadata().section(LicensesMetadata.TYPE); assertTrue(License.LicenseType.isBasic(licensesMetadata.getLicense().type())); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java index be43705984435..9f6a961a56a03 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/LicensesMetadataSerializationTests.java @@ -69,11 +69,11 @@ public void testLicenseMetadataParsingDoesNotSwallowOtherMetadata() throws Excep RepositoriesMetadata repositoriesMetadata = new RepositoriesMetadata(Collections.singletonList(repositoryMetadata)); final Metadata.Builder metadataBuilder = Metadata.builder(); if (randomBoolean()) { // random order of insertion - metadataBuilder.putCustom(licensesMetadata.getWriteableName(), licensesMetadata); - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putSection(licensesMetadata.getWriteableName(), licensesMetadata); + metadataBuilder.putSection(repositoriesMetadata.getWriteableName(), repositoriesMetadata); } else { - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); - metadataBuilder.putCustom(licensesMetadata.getWriteableName(), licensesMetadata); + metadataBuilder.putSection(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putSection(licensesMetadata.getWriteableName(), licensesMetadata); } // serialize metadata XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -84,8 +84,8 @@ public void testLicenseMetadataParsingDoesNotSwallowOtherMetadata() throws Excep // deserialize metadata again Metadata metadata = Metadata.Builder.fromXContent(createParser(builder)); // check that custom metadata still present - assertThat(metadata.custom(licensesMetadata.getWriteableName()), notNullValue()); - assertThat(metadata.custom(repositoriesMetadata.getWriteableName()), notNullValue()); + assertThat(metadata.section(licensesMetadata.getWriteableName()), notNullValue()); + assertThat(metadata.section(repositoriesMetadata.getWriteableName()), notNullValue()); } public void testXContentSerializationOneTrial() throws Exception { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java index 14ec60dc931d3..6b11a37b4aa5d 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/license/TestUtils.java @@ -448,7 +448,7 @@ public static XPackLicenseState newTestLicenseState() { } public static void putLicense(Metadata.Builder builder, License license) { - builder.putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); + builder.putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, null)); } public static MockLicenseState newMockLicenceState() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java index c42f0c008548c..ed761c9ceebf9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/cluster/routing/allocation/DataTierAllocationDeciderTests.java @@ -888,7 +888,7 @@ private ClusterState clusterStateWithIndexAndNodes(String tierPreference, Discov ) ); if (desiredNodes != null) { - metadata.putCustom(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(desiredNodes)); + metadata.putSection(DesiredNodesMetadata.TYPE, new DesiredNodesMetadata(desiredNodes)); } return ClusterState.builder(new ClusterName("test")).nodes(discoveryNodes).metadata(metadata).build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java index e46c40ca96ff7..9aa56ce7ce276 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CheckShrinkReadyStepTests.java @@ -455,7 +455,7 @@ public void testStepCompletableIfAllShardsActive() { .metadata( Metadata.builder() .indices(indices) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( @@ -534,7 +534,7 @@ public void testStepBecomesUncompletable() { .metadata( Metadata.builder() .indices(indices) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java index ce8cd5ae46ace..3bc21d574d759 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java @@ -83,7 +83,7 @@ private void testPerformAction(String policyName, String expectedPolicyName) { .metadata( Metadata.builder() .put(indexMetadata, false) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) + .putSection(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) .build() ) .build(); @@ -123,7 +123,9 @@ public void testPerformActionRejectsNonexistentRepository() { GenerateSnapshotNameStep generateSnapshotNameStep = createRandomInstance(); ClusterState clusterState = ClusterState.builder(emptyClusterState()) - .metadata(Metadata.builder().put(indexMetadata, false).putCustom(RepositoriesMetadata.TYPE, RepositoriesMetadata.EMPTY).build()) + .metadata( + Metadata.builder().put(indexMetadata, false).putSection(RepositoriesMetadata.TYPE, RepositoriesMetadata.EMPTY).build() + ) .build(); IllegalStateException illegalStateException = expectThrows( @@ -168,7 +170,7 @@ public void testPerformActionWillOverwriteCachedRepository() { .metadata( Metadata.builder() .put(indexMetadata, false) - .putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) + .putSection(RepositoriesMetadata.TYPE, new RepositoriesMetadata(Collections.singletonList(repo))) .build() ) .build(); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java index 868bbc03b652c..ddae25da7e64a 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleOperationMetadataTests.java @@ -10,6 +10,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.test.AbstractChunkedSerializingTestCase; import org.elasticsearch.test.TransportVersionUtils; @@ -19,7 +20,7 @@ import static org.hamcrest.Matchers.containsInAnyOrder; -public class LifecycleOperationMetadataTests extends AbstractChunkedSerializingTestCase { +public class LifecycleOperationMetadataTests extends AbstractChunkedSerializingTestCase { @Override protected LifecycleOperationMetadata createTestInstance() { @@ -32,12 +33,12 @@ protected LifecycleOperationMetadata doParseInstance(XContentParser parser) thro } @Override - protected Writeable.Reader instanceReader() { + protected Writeable.Reader instanceReader() { return LifecycleOperationMetadata::new; } @Override - protected Metadata.Custom mutateInstance(Metadata.Custom instance) { + protected MetadataSection mutateInstance(MetadataSection instance) { LifecycleOperationMetadata metadata = (LifecycleOperationMetadata) instance; if (randomBoolean()) { return new LifecycleOperationMetadata( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java index 3efe2dc04ea19..ee0f126d50ec8 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyUtilsTests.java @@ -49,7 +49,7 @@ public void testCalculateUsage() { ClusterState state = ClusterState.builder(new ClusterName("mycluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), @@ -70,7 +70,7 @@ public void testCalculateUsage() { ClusterState state = ClusterState.builder(new ClusterName("mycluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), @@ -95,7 +95,7 @@ public void testCalculateUsage() { ClusterState state = ClusterState.builder(new ClusterName("mycluster")) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), @@ -106,7 +106,7 @@ public void testCalculateUsage() { IndexMetadata.builder("myindex") .settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "mypolicy")) ) - .putCustom( + .putSection( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata( Collections.singletonMap( @@ -136,7 +136,7 @@ public void testCalculateUsage() { { Metadata.Builder mBuilder = Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap("mypolicy", LifecyclePolicyMetadataTests.createRandomPolicyMetadata("mypolicy")), @@ -156,7 +156,7 @@ public void testCalculateUsage() { .settings(indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, "otherpolicy")) ) - .putCustom( + .putSection( ComposableIndexTemplateMetadata.TYPE, new ComposableIndexTemplateMetadata( Collections.singletonMap( diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java index 9871cb79b595b..fc9ee85a54df6 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/OperationModeUpdateTaskTests.java @@ -105,7 +105,7 @@ private OperationMode executeILMUpdate( ); Metadata.Builder metadata = Metadata.builder().persistentSettings(settings(IndexVersion.current()).build()); if (metadataInstalled) { - metadata.customs( + metadata.sections( Map.of(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata, SnapshotLifecycleMetadata.TYPE, snapshotLifecycleMetadata) ); } @@ -117,8 +117,8 @@ private OperationMode executeILMUpdate( } else { assertThat("expected a different state instance but they were the same", state, not(equalTo(newState))); } - LifecycleOperationMetadata newMetadata = newState.metadata().custom(LifecycleOperationMetadata.TYPE); - IndexLifecycleMetadata oldMetadata = newState.metadata().custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + LifecycleOperationMetadata newMetadata = newState.metadata().section(LifecycleOperationMetadata.TYPE); + IndexLifecycleMetadata oldMetadata = newState.metadata().section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); return Optional.ofNullable(newMetadata) .map(LifecycleOperationMetadata::getILMOperationMode) .orElseGet(oldMetadata::getOperationMode); @@ -139,7 +139,7 @@ private OperationMode executeSLMUpdate( ); Metadata.Builder metadata = Metadata.builder().persistentSettings(settings(IndexVersion.current()).build()); if (metadataInstalled) { - metadata.customs( + metadata.sections( Map.of(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata, SnapshotLifecycleMetadata.TYPE, snapshotLifecycleMetadata) ); } @@ -151,8 +151,9 @@ private OperationMode executeSLMUpdate( } else { assertThat(state, not(equalTo(newState))); } - LifecycleOperationMetadata newMetadata = newState.metadata().custom(LifecycleOperationMetadata.TYPE); - SnapshotLifecycleMetadata oldMetadata = newState.metadata().custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); + LifecycleOperationMetadata newMetadata = newState.metadata().section(LifecycleOperationMetadata.TYPE); + SnapshotLifecycleMetadata oldMetadata = newState.metadata() + .section(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); return Optional.ofNullable(newMetadata) .map(LifecycleOperationMetadata::getSLMOperationMode) .orElseGet(oldMetadata::getOperationMode); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java index 51ebc98176955..3d7e888114d24 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/PauseFollowerIndexStepTests.java @@ -17,7 +17,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ccr.action.PauseFollowAction; import org.elasticsearch.xpack.core.ccr.action.ShardFollowTask; import org.mockito.Mockito; @@ -112,11 +112,11 @@ public final void testNoShardFollowPersistentTasks() throws Exception { .numberOfReplicas(0) .build(); - PersistentTasksCustomMetadata.Builder emptyPersistentTasks = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder emptyPersistentTasks = PersistentTasksMetadataSection.builder(); ClusterState clusterState = ClusterState.builder(new ClusterName("_cluster")) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, emptyPersistentTasks.build()) + .putSection(PersistentTasksMetadataSection.TYPE, emptyPersistentTasks.build()) .put(indexMetadata, false) .build() ) @@ -153,7 +153,7 @@ public final void testNoShardFollowTasksForManagedIndex() throws Exception { } private static ClusterState setupClusterStateWithFollowingIndex(IndexMetadata followerIndex) { - PersistentTasksCustomMetadata.Builder persistentTasks = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection.Builder persistentTasks = PersistentTasksMetadataSection.builder() .addTask( "1", ShardFollowTask.NAME, @@ -178,7 +178,10 @@ private static ClusterState setupClusterStateWithFollowingIndex(IndexMetadata fo return ClusterState.builder(new ClusterName("_cluster")) .metadata( - Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, persistentTasks.build()).put(followerIndex, false).build() + Metadata.builder() + .putSection(PersistentTasksMetadataSection.TYPE, persistentTasks.build()) + .put(followerIndex, false) + .build() ) .build(); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java index a33d6e3332a40..55b2525609ba0 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java @@ -214,7 +214,7 @@ public void assertPerformAction( ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStepTests.java index ed1cb477c30ef..e3ea057f266c9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForSnapshotStepTests.java @@ -123,7 +123,7 @@ public void testSlmPolicyNotExecuted() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); Map indices = Map.of(indexMetadata.getIndex().getName(), indexMetadata); - Metadata.Builder meta = Metadata.builder().indices(indices).putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata); + Metadata.Builder meta = Metadata.builder().indices(indices).putSection(SnapshotLifecycleMetadata.TYPE, smlMetadata); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(meta).build(); SetOnce isConditionMet = new SetOnce<>(); SetOnce informationContext = new SetOnce<>(); @@ -237,7 +237,7 @@ public void testIndexNotBackedUpYet() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); Map indices = Map.of(indexMetadata.getIndex().getName(), indexMetadata); - Metadata.Builder meta = Metadata.builder().indices(indices).putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata); + Metadata.Builder meta = Metadata.builder().indices(indices).putSection(SnapshotLifecycleMetadata.TYPE, smlMetadata); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(meta).build(); SetOnce error = new SetOnce<>(); instance.evaluateCondition(clusterState.metadata(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { @@ -301,7 +301,7 @@ private void assertSlmPolicyExecuted( .numberOfReplicas(randomIntBetween(0, 5)) .build(); Map indices = Map.of(indexMetadata.getIndex().getName(), indexMetadata); - Metadata.Builder meta = Metadata.builder().indices(indices).putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata); + Metadata.Builder meta = Metadata.builder().indices(indices).putSection(SnapshotLifecycleMetadata.TYPE, smlMetadata); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(meta).build(); SetOnce isConditionMet = new SetOnce<>(); SetOnce informationContext = new SetOnce<>(); @@ -358,7 +358,7 @@ public void testNullStartTime() { .numberOfReplicas(randomIntBetween(0, 5)) .build(); Map indices = Map.of(indexMetadata.getIndex().getName(), indexMetadata); - Metadata.Builder meta = Metadata.builder().indices(indices).putCustom(SnapshotLifecycleMetadata.TYPE, smlMetadata); + Metadata.Builder meta = Metadata.builder().indices(indices).putSection(SnapshotLifecycleMetadata.TYPE, smlMetadata); ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(meta).build(); SetOnce error = new SetOnce<>(); instance.evaluateCondition(clusterState.metadata(), indexMetadata.getIndex(), new AsyncWaitStep.Listener() { diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java index c11cefed137e9..ab2265328127f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/MlTasksTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodeUtils; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; @@ -39,7 +39,7 @@ public class MlTasksTests extends ESTestCase { public void testGetJobState() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); // A missing task is a closed job assertEquals(JobState.CLOSED, MlTasks.getJobState("foo", tasksBuilder.build())); // A task with no status is opening @@ -47,7 +47,7 @@ public void testGetJobState() { MlTasks.jobTaskId("foo"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo"), - new PersistentTasksCustomMetadata.Assignment("bar", "test assignment") + new PersistentTasksMetadataSection.Assignment("bar", "test assignment") ); assertEquals(JobState.OPENING, MlTasks.getJobState("foo", tasksBuilder.build())); @@ -63,7 +63,7 @@ public void testGetJobState_GivenNull() { } public void testGetDatefeedState() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); // A missing task is a stopped datafeed assertEquals(DatafeedState.STOPPED, MlTasks.getDatafeedState("foo", tasksBuilder.build())); @@ -71,7 +71,7 @@ public void testGetDatefeedState() { MlTasks.datafeedTaskId("foo"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("foo", 0L), - new PersistentTasksCustomMetadata.Assignment("bar", "test assignment") + new PersistentTasksMetadataSection.Assignment("bar", "test assignment") ); // A task with no state means the datafeed is starting assertEquals(DatafeedState.STARTING, MlTasks.getDatafeedState("foo", tasksBuilder.build())); @@ -81,7 +81,7 @@ public void testGetDatefeedState() { } public void testGetSnapshotUpgradeState() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); // A missing task is a stopped snapshot upgrade assertEquals(SnapshotUpgradeState.STOPPED, MlTasks.getSnapshotUpgradeState("foo", "1", tasksBuilder.build())); @@ -89,7 +89,7 @@ public void testGetSnapshotUpgradeState() { MlTasks.snapshotUpgradeTaskId("foo", "1"), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams("foo", "1"), - new PersistentTasksCustomMetadata.Assignment("bar", "test assignment") + new PersistentTasksMetadataSection.Assignment("bar", "test assignment") ); // A task with no state means the datafeed is starting assertEquals(SnapshotUpgradeState.LOADING_OLD_STATE, MlTasks.getSnapshotUpgradeState("foo", "1", tasksBuilder.build())); @@ -104,12 +104,12 @@ public void testGetSnapshotUpgradeState() { public void testGetJobTask() { assertNull(MlTasks.getJobTask("foo", null)); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.jobTaskId("foo"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo"), - new PersistentTasksCustomMetadata.Assignment("bar", "test assignment") + new PersistentTasksMetadataSection.Assignment("bar", "test assignment") ); assertNotNull(MlTasks.getJobTask("foo", tasksBuilder.build())); @@ -119,12 +119,12 @@ public void testGetJobTask() { public void testGetDatafeedTask() { assertNull(MlTasks.getDatafeedTask("foo", null)); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.datafeedTaskId("foo"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("foo", 0L), - new PersistentTasksCustomMetadata.Assignment("bar", "test assignment") + new PersistentTasksMetadataSection.Assignment("bar", "test assignment") ); assertNotNull(MlTasks.getDatafeedTask("foo", tasksBuilder.build())); @@ -132,26 +132,26 @@ public void testGetDatafeedTask() { } public void testOpenJobIds() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); tasksBuilder.addTask( MlTasks.jobTaskId("foo-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.jobTaskId("bar"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("bar"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); assertThat(MlTasks.openJobIds(tasksBuilder.build()), containsInAnyOrder("foo-1", "bar")); @@ -162,26 +162,26 @@ public void testOpenJobIds_GivenNull() { } public void testStartedDatafeedIds() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); tasksBuilder.addTask( MlTasks.jobTaskId("job-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df1"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df1", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df2"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df2", 0L), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); assertThat(MlTasks.startedDatafeedIds(tasksBuilder.build()), containsInAnyOrder("df1", "df2")); @@ -192,24 +192,24 @@ public void testStartedDatafeedIds_GivenNull() { } public void testUnallocatedJobIds() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.jobTaskId("job_with_assignment"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("job_with_assignment"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.jobTaskId("job_without_assignment"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("job_without_assignment"), - new PersistentTasksCustomMetadata.Assignment(null, "test assignment") + new PersistentTasksMetadataSection.Assignment(null, "test assignment") ); tasksBuilder.addTask( MlTasks.jobTaskId("job_without_node"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("job_without_node"), - new PersistentTasksCustomMetadata.Assignment("dead-node", "expired node") + new PersistentTasksMetadataSection.Assignment("dead-node", "expired node") ); DiscoveryNodes nodes = DiscoveryNodes.builder() @@ -222,24 +222,24 @@ public void testUnallocatedJobIds() { } public void testUnallocatedDatafeedIds() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.datafeedTaskId("datafeed_with_assignment"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("datafeed_with_assignment", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("datafeed_without_assignment"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("datafeed_without_assignment", 0L), - new PersistentTasksCustomMetadata.Assignment(null, "test assignment") + new PersistentTasksMetadataSection.Assignment(null, "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("datafeed_without_node"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("datafeed_without_node", 0L), - new PersistentTasksCustomMetadata.Assignment("dead_node", "expired node") + new PersistentTasksMetadataSection.Assignment("dead_node", "expired node") ); DiscoveryNodes nodes = DiscoveryNodes.builder() @@ -255,64 +255,64 @@ public void testUnallocatedDatafeedIds() { } public void testDatafeedTasksOnNode() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); tasksBuilder.addTask( MlTasks.datafeedTaskId("df1"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df1", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.jobTaskId("job-2"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-2"), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df2"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df2", 0L), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); assertThat(MlTasks.datafeedTasksOnNode(tasksBuilder.build(), "node-2"), contains(hasProperty("id", equalTo("datafeed-df2")))); } public void testJobTasksOnNode() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); tasksBuilder.addTask( MlTasks.jobTaskId("job-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df1"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df1", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.jobTaskId("job-2"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-2"), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df2"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df2", 0L), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.addTask( MlTasks.jobTaskId("job-3"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-3"), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); assertThat( @@ -322,21 +322,21 @@ public void testJobTasksOnNode() { } public void testNonFailedJobTasksOnNode() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); assertThat(MlTasks.openJobIds(tasksBuilder.build()), empty()); tasksBuilder.addTask( MlTasks.jobTaskId("job-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-1"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-1"), new JobTaskState(JobState.FAILED, 1, "testing", Instant.now())); tasksBuilder.addTask( MlTasks.jobTaskId("job-2"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-2"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); if (randomBoolean()) { tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-2"), new JobTaskState(JobState.OPENED, 2, "testing", Instant.now())); @@ -345,7 +345,7 @@ public void testNonFailedJobTasksOnNode() { MlTasks.jobTaskId("job-3"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-3"), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); if (randomBoolean()) { tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-3"), new JobTaskState(JobState.FAILED, 3, "testing", Instant.now())); @@ -361,7 +361,7 @@ public void testGetDataFrameAnalyticsState_GivenNullTask() { public void testGetDataFrameAnalyticsState_GivenTaskWithNullState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask(jobId, "test_node", null, false); + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask(jobId, "test_node", null, false); DataFrameAnalyticsState state = MlTasks.getDataFrameAnalyticsState(task); @@ -370,7 +370,7 @@ public void testGetDataFrameAnalyticsState_GivenTaskWithNullState() { public void testGetDataFrameAnalyticsState_GivenTaskWithStartedState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask( + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask( jobId, "test_node", DataFrameAnalyticsState.STARTED, @@ -384,7 +384,7 @@ public void testGetDataFrameAnalyticsState_GivenTaskWithStartedState() { public void testGetDataFrameAnalyticsState_GivenStaleTaskWithStartedState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask( + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask( jobId, "test_node", DataFrameAnalyticsState.STARTED, @@ -398,7 +398,7 @@ public void testGetDataFrameAnalyticsState_GivenStaleTaskWithStartedState() { public void testGetDataFrameAnalyticsState_GivenTaskWithStoppingState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask( + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask( jobId, "test_node", DataFrameAnalyticsState.STOPPING, @@ -412,7 +412,7 @@ public void testGetDataFrameAnalyticsState_GivenTaskWithStoppingState() { public void testGetDataFrameAnalyticsState_GivenStaleTaskWithStoppingState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask( + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask( jobId, "test_node", DataFrameAnalyticsState.STOPPING, @@ -426,7 +426,7 @@ public void testGetDataFrameAnalyticsState_GivenStaleTaskWithStoppingState() { public void testGetDataFrameAnalyticsState_GivenTaskWithFailedState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask( + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask( jobId, "test_node", DataFrameAnalyticsState.FAILED, @@ -440,7 +440,7 @@ public void testGetDataFrameAnalyticsState_GivenTaskWithFailedState() { public void testGetDataFrameAnalyticsState_GivenStaleTaskWithFailedState() { String jobId = "foo"; - PersistentTasksCustomMetadata.PersistentTask task = createDataFrameAnalyticsTask( + PersistentTasksMetadataSection.PersistentTask task = createDataFrameAnalyticsTask( jobId, "test_node", DataFrameAnalyticsState.FAILED, @@ -464,37 +464,37 @@ public void testPrettyPrintTaskName_GivenUnknownTaskName() { } public void testFindMlProcessTasks() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.jobTaskId("ad-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("ad-1"), - new PersistentTasksCustomMetadata.Assignment(randomAlphaOfLength(5), "test") + new PersistentTasksMetadataSection.Assignment(randomAlphaOfLength(5), "test") ); tasksBuilder.addTask( MlTasks.dataFrameAnalyticsTaskId("dfa-1"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams("dfa-1", MlConfigVersion.CURRENT, true), - new PersistentTasksCustomMetadata.Assignment(randomAlphaOfLength(5), "test assignment") + new PersistentTasksMetadataSection.Assignment(randomAlphaOfLength(5), "test assignment") ); tasksBuilder.addTask( MlTasks.snapshotUpgradeTaskId("snapshot-upgrade-1", "some-snapshot-id"), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams("snapshot-upgrade-1", "some-snapshot-id"), - new PersistentTasksCustomMetadata.Assignment(randomAlphaOfLength(5), "test assignment") + new PersistentTasksMetadataSection.Assignment(randomAlphaOfLength(5), "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("datafeed-1"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("datafeed-1", "now"), - new PersistentTasksCustomMetadata.Assignment(randomAlphaOfLength(5), "test assignment") + new PersistentTasksMetadataSection.Assignment(randomAlphaOfLength(5), "test assignment") ); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); - Set> mlProcessTasks = MlTasks.findMlProcessTasks(tasks); + Set> mlProcessTasks = MlTasks.findMlProcessTasks(tasks); assertThat(mlProcessTasks, hasSize(3)); Set taskNames = mlProcessTasks.stream() - .map(PersistentTasksCustomMetadata.PersistentTask::getTaskName) + .map(PersistentTasksMetadataSection.PersistentTask::getTaskName) .collect(Collectors.toSet()); assertThat( taskNames, @@ -502,18 +502,18 @@ public void testFindMlProcessTasks() { ); } - private static PersistentTasksCustomMetadata.PersistentTask createDataFrameAnalyticsTask( + private static PersistentTasksMetadataSection.PersistentTask createDataFrameAnalyticsTask( String jobId, String nodeId, DataFrameAnalyticsState state, boolean isStale ) { - PersistentTasksCustomMetadata.Builder builder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder builder = PersistentTasksMetadataSection.builder(); builder.addTask( MlTasks.dataFrameAnalyticsTaskId(jobId), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams(jobId, MlConfigVersion.CURRENT, false), - new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (state != null) { builder.updateTaskState( @@ -521,7 +521,7 @@ private static PersistentTasksCustomMetadata.PersistentTask createDataFrameAn new DataFrameAnalyticsTaskState(state, builder.getLastAllocationId() - (isStale ? 1 : 0), null, Instant.now()) ); } - PersistentTasksCustomMetadata tasks = builder.build(); + PersistentTasksMetadataSection tasks = builder.build(); return tasks.getTask(MlTasks.dataFrameAnalyticsTaskId(jobId)); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractorTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractorTests.java index a5d823fc2144f..4b06b434b5772 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractorTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ml/utils/InferenceProcessorInfoExtractorTests.java @@ -70,7 +70,7 @@ public void testGetModelIdsFromInferenceProcessors() throws IOException { Set expectedModelIds = new HashSet<>(Arrays.asList(modelId1, modelId2, modelId3)); ClusterState clusterState = buildClusterStateWithModelReferences(2, modelId1, modelId2, modelId3); - IngestMetadata ingestMetadata = clusterState.metadata().custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = clusterState.metadata().section(IngestMetadata.TYPE); Set actualModelIds = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(ingestMetadata); assertThat(actualModelIds, equalTo(expectedModelIds)); @@ -81,7 +81,7 @@ public void testGetModelIdsFromInferenceProcessorsWhenNull() throws IOException Set expectedModelIds = new HashSet<>(Arrays.asList()); ClusterState clusterState = buildClusterStateWithModelReferences(0); - IngestMetadata ingestMetadata = clusterState.metadata().custom(IngestMetadata.TYPE); + IngestMetadata ingestMetadata = clusterState.metadata().section(IngestMetadata.TYPE); Set actualModelIds = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(ingestMetadata); assertThat(actualModelIds, equalTo(expectedModelIds)); @@ -126,7 +126,7 @@ public void testNumInferenceProcessorsRecursivelyDefined() throws IOException { IngestMetadata ingestMetadata = new IngestMetadata(configurations); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .nodes( DiscoveryNodes.builder() .add(DiscoveryNodeUtils.create("min_node", new TransportAddress(InetAddress.getLoopbackAddress(), 9300))) @@ -176,7 +176,7 @@ private static ClusterState buildClusterStateWithModelReferences(int numPipeline IngestMetadata ingestMetadata = new IngestMetadata(configurations); return ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .nodes( DiscoveryNodes.builder() .add(DiscoveryNodeUtils.create("min_node", new TransportAddress(InetAddress.getLoopbackAddress(), 9300))) diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java index 195e126662481..657f16d80c96f 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/security/authc/support/mapper/TemplateRoleNameTests.java @@ -296,7 +296,7 @@ public void testValidateWillFailWhenStoredScriptIsNotEnabled() { final ScriptMetadata scriptMetadata = new ScriptMetadata.Builder(null).storeScript("foo", storedScriptSource).build(); when(clusterChangedEvent.state()).thenReturn(clusterState); when(clusterState.metadata()).thenReturn(metadata); - when(metadata.custom(ScriptMetadata.TYPE)).thenReturn(scriptMetadata); + when(metadata.section(ScriptMetadata.TYPE)).thenReturn(scriptMetadata); when(storedScriptSource.getLang()).thenReturn("mustache"); when(storedScriptSource.getSource()).thenReturn(""); when(storedScriptSource.getOptions()).thenReturn(Collections.emptyMap()); @@ -324,7 +324,7 @@ public void testValidateWillFailWhenStoredScriptIsNotFound() { final ScriptMetadata scriptMetadata = new ScriptMetadata.Builder(null).build(); when(clusterChangedEvent.state()).thenReturn(clusterState); when(clusterState.metadata()).thenReturn(metadata); - when(metadata.custom(ScriptMetadata.TYPE)).thenReturn(scriptMetadata); + when(metadata.section(ScriptMetadata.TYPE)).thenReturn(scriptMetadata); scriptService.applyClusterState(clusterChangedEvent); final BytesReference storedScript = new BytesArray(""" diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java index b0127c0005323..623c403fd4de5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/template/IndexTemplateRegistryTests.java @@ -812,8 +812,8 @@ private ClusterState createClusterState( Metadata.builder() .componentTemplates(componentTemplates) .transientSettings(Settings.EMPTY) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) - .putCustom(IngestMetadata.TYPE, ingestMetadata) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IngestMetadata.TYPE, ingestMetadata) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java index 29041b0c58434..79070061aedf9 100644 --- a/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java +++ b/x-pack/plugin/deprecation/src/main/java/org/elasticsearch/xpack/deprecation/logging/DeprecationIndexingComponent.java @@ -135,7 +135,7 @@ public void clusterChanged(ClusterChangedEvent event) { if (event.metadataChanged() == false) { return; } - final IndexLifecycleMetadata indexLifecycleMetadata = event.state().metadata().custom(IndexLifecycleMetadata.TYPE); + final IndexLifecycleMetadata indexLifecycleMetadata = event.state().metadata().section(IndexLifecycleMetadata.TYPE); if (event.state().getMetadata().templatesV2().containsKey(".deprecation-indexing-template") && indexLifecycleMetadata != null diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java index e5b9ca32808a7..150d23849a2d8 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/DownsampleShardPersistentTaskExecutor.java @@ -36,8 +36,8 @@ import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; @@ -94,7 +94,7 @@ protected AllocatedPersistentTask createTask( final String type, final String action, final TaskId parentTaskId, - final PersistentTasksCustomMetadata.PersistentTask taskInProgress, + final PersistentTasksMetadataSection.PersistentTask taskInProgress, final Map headers ) { final DownsampleShardTaskParams params = taskInProgress.getParams(); @@ -123,7 +123,7 @@ public void validate(DownsampleShardTaskParams params, ClusterState clusterState } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( final DownsampleShardTaskParams params, final Collection candidateNodes, final ClusterState clusterState @@ -139,7 +139,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( var indexShardRouting = findShardRoutingTable(shardId, clusterState); if (indexShardRouting == null) { var node = selectLeastLoadedNode(clusterState, candidateNodes, DiscoveryNode::canContainData); - return new PersistentTasksCustomMetadata.Assignment(node.getId(), "a node to fail and stop this persistent task"); + return new PersistentTasksMetadataSection.Assignment(node.getId(), "a node to fail and stop this persistent task"); } final ShardRouting shardRouting = indexShardRouting.primaryShard(); @@ -151,7 +151,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( .filter(candidateNode -> candidateNode.getId().equals(shardRouting.currentNodeId())) .findAny() .map( - node -> new PersistentTasksCustomMetadata.Assignment( + node -> new PersistentTasksMetadataSection.Assignment( node.getId(), "downsampling using node holding shard [" + shardId + "]" ) diff --git a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java index d8c9acff156ce..5ca49eac6b4c8 100644 --- a/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java +++ b/x-pack/plugin/downsample/src/main/java/org/elasticsearch/xpack/downsample/TransportDownsampleAction.java @@ -65,7 +65,7 @@ import org.elasticsearch.indices.IndicesService; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.tasks.Task; @@ -496,7 +496,7 @@ private void performShardDownsampling( dimensionFields, shardId ); - Predicate> predicate = runningTask -> { + Predicate> predicate = runningTask -> { if (runningTask == null) { // NOTE: don't need to wait if the persistent task completed and was removed return true; @@ -507,7 +507,7 @@ private void performShardDownsampling( var taskListener = new PersistentTasksService.WaitForPersistentTaskListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (persistentTask != null) { var runningPersistentTaskState = (DownsampleShardPersistentTaskState) persistentTask.getState(); if (runningPersistentTaskState != null) { diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java index 868ec49ff1d97..d7409fc717052 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPlugin.java @@ -10,7 +10,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -214,11 +214,11 @@ public Collection createComponents(PluginServices services) { @Override public List getNamedWriteables() { return List.of( - new NamedWriteableRegistry.Entry(Metadata.Custom.class, EnrichMetadata.TYPE, EnrichMetadata::new), + new NamedWriteableRegistry.Entry(MetadataSection.class, EnrichMetadata.TYPE, EnrichMetadata::new), new NamedWriteableRegistry.Entry( NamedDiff.class, EnrichMetadata.TYPE, - in -> EnrichMetadata.readDiffFrom(Metadata.Custom.class, EnrichMetadata.TYPE, in) + in -> EnrichMetadata.readDiffFrom(MetadataSection.class, EnrichMetadata.TYPE, in) ) ); } @@ -226,7 +226,7 @@ public List getNamedWriteables() { @Override public List getNamedXContent() { return List.of( - new NamedXContentRegistry.Entry(Metadata.Custom.class, new ParseField(EnrichMetadata.TYPE), EnrichMetadata::fromXContent) + new NamedXContentRegistry.Entry(MetadataSection.class, new ParseField(EnrichMetadata.TYPE), EnrichMetadata::fromXContent) ); } diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java index 5b7020c3f2bb0..4f2074c98805a 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichPolicyReindexPipeline.java @@ -48,7 +48,7 @@ static String pipelineName() { * @return true if a pipeline exists that is compatible with this version of Enrich, false otherwise */ static boolean exists(ClusterState clusterState) { - final IngestMetadata ingestMetadata = clusterState.getMetadata().custom(IngestMetadata.TYPE); + final IngestMetadata ingestMetadata = clusterState.getMetadata().section(IngestMetadata.TYPE); // we ensure that we both have the pipeline and its version represents the current (or later) version if (ingestMetadata != null) { final PipelineConfiguration pipeline = ingestMetadata.getPipelines().get(pipelineName()); diff --git a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java index 82f9877826a5c..dcf7eeac76b07 100644 --- a/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java +++ b/x-pack/plugin/enrich/src/main/java/org/elasticsearch/xpack/enrich/EnrichStore.java @@ -157,7 +157,7 @@ public static EnrichPolicy getPolicy(String name, ClusterState state) { */ public static Map getPolicies(ClusterState state) { final Map policies; - final EnrichMetadata enrichMetadata = state.metadata().custom(EnrichMetadata.TYPE); + final EnrichMetadata enrichMetadata = state.metadata().section(EnrichMetadata.TYPE); if (enrichMetadata != null) { // Make a copy, because policies map inside custom metadata is read only: policies = new HashMap<>(enrichMetadata.getPolicies()); @@ -178,7 +178,7 @@ private static void updateClusterState( public ClusterState execute(ClusterState currentState) throws Exception { Map policies = function.apply(currentState); Metadata metadata = Metadata.builder(currentState.metadata()) - .putCustom(EnrichMetadata.TYPE, new EnrichMetadata(policies)) + .putSection(EnrichMetadata.TYPE, new EnrichMetadata(policies)) .build(); return ClusterState.builder(currentState).metadata(metadata).build(); } diff --git a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java index 06f9eb21fe2dc..0655cec4bdeed 100644 --- a/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java +++ b/x-pack/plugin/enrich/src/test/java/org/elasticsearch/xpack/enrich/EnrichPolicyExecutorTests.java @@ -386,7 +386,7 @@ protected void public void testRunPolicyLocallyMissingPolicy() { EnrichPolicy enrichPolicy = EnrichPolicyTests.randomEnrichPolicy(XContentType.JSON); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(EnrichMetadata.TYPE, new EnrichMetadata(Map.of("id", enrichPolicy))).build()) + .metadata(Metadata.builder().putSection(EnrichMetadata.TYPE, new EnrichMetadata(Map.of("id", enrichPolicy))).build()) .build(); ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn(clusterState); diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java index 50102b8cfcf53..4114a418aa45a 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/analytics/AnalyticsTemplateRegistryTests.java @@ -485,8 +485,8 @@ private ClusterState createClusterState( .indexTemplates(composableTemplates) .componentTemplates(componentTemplates) .transientSettings(Settings.EMPTY) - .putCustom(IngestMetadata.TYPE, ingestMetadata) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IngestMetadata.TYPE, ingestMetadata) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java index 3fbc5cd749cb2..ecafcb0bcb5b5 100644 --- a/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java +++ b/x-pack/plugin/ent-search/src/test/java/org/elasticsearch/xpack/application/connector/ConnectorTemplateRegistryTests.java @@ -515,8 +515,8 @@ private ClusterState createClusterState( .indexTemplates(composableTemplates) .componentTemplates(componentTemplates) .transientSettings(Settings.EMPTY) - .putCustom(IngestMetadata.TYPE, ingestMetadata) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IngestMetadata.TYPE, ingestMetadata) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java index e976a8d9be48e..55ce7f28a2764 100644 --- a/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java +++ b/x-pack/plugin/esql/src/main/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolver.java @@ -381,7 +381,7 @@ public void messageReceived(LookupRequest request, TransportChannel channel, Tas } protected Map availablePolicies() { - final EnrichMetadata metadata = clusterService.state().metadata().custom(EnrichMetadata.TYPE); + final EnrichMetadata metadata = clusterService.state().metadata().section(EnrichMetadata.TYPE); return metadata == null ? Map.of() : metadata.getPolicies(); } diff --git a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java index 45623a39da936..c6fe91a4b25ab 100644 --- a/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java +++ b/x-pack/plugin/esql/src/test/java/org/elasticsearch/xpack/esql/enrich/EnrichPolicyResolverTests.java @@ -454,7 +454,7 @@ static ClusterService mockClusterService(Map policies) { ClusterService clusterService = mock(ClusterService.class); EnrichMetadata enrichMetadata = new EnrichMetadata(policies); ClusterState state = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().customs(Map.of(EnrichMetadata.TYPE, enrichMetadata))) + .metadata(Metadata.builder().sections(Map.of(EnrichMetadata.TYPE, enrichMetadata))) .build(); when(clusterService.state()).thenReturn(state); return clusterService; diff --git a/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java b/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java index d6e6f57f10976..d662c79f7484d 100644 --- a/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java +++ b/x-pack/plugin/geoip-enterprise-downloader/src/main/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListener.java @@ -25,7 +25,7 @@ import org.elasticsearch.license.LicenseStateListener; import org.elasticsearch.license.LicensedFeature; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.RemoteTransportException; @@ -85,7 +85,7 @@ public void licenseStateChanged() { @Override public void clusterChanged(ClusterChangedEvent event) { - hasIngestGeoIpMetadata = event.state().metadata().custom(INGEST_GEOIP_CUSTOM_METADATA_TYPE) != null; + hasIngestGeoIpMetadata = event.state().metadata().section(INGEST_GEOIP_CUSTOM_METADATA_TYPE) != null; final boolean ingestGeoIpCustomMetaChangedInEvent = event.metadataChanged() && event.changedCustomMetadataSet().contains(INGEST_GEOIP_CUSTOM_METADATA_TYPE); final boolean masterNodeChanged = Objects.equals( @@ -131,7 +131,7 @@ private void ensureTaskStarted() { } private void ensureTaskStopped() { - ActionListener> listener = ActionListener.wrap( + ActionListener> listener = ActionListener.wrap( r -> logger.debug("Stopped enterprise geoip downloader task"), e -> { Throwable t = e instanceof RemoteTransportException ? ExceptionsHelper.unwrapCause(e) : e; diff --git a/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java b/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java index 5a5aacd392f3c..8f70e1df06283 100644 --- a/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java +++ b/x-pack/plugin/geoip-enterprise-downloader/src/test/java/org/elasticsearch/xpack/geoip/EnterpriseGeoIpDownloaderLicenseListenerTests.java @@ -27,7 +27,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.license.internal.XPackLicenseStatus; import org.elasticsearch.node.Node; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.RemovePersistentTaskAction; import org.elasticsearch.persistent.StartPersistentTaskAction; import org.elasticsearch.telemetry.metric.MeterRegistry; @@ -166,8 +166,10 @@ private ClusterState createClusterState(boolean isMasterNode, boolean hasGeoIpDa } ClusterState.Builder clusterStateBuilder = ClusterState.builder(new ClusterName("name")); if (hasGeoIpDatabases) { - PersistentTasksCustomMetadata tasksCustomMetadata = new PersistentTasksCustomMetadata(1L, Map.of()); - clusterStateBuilder.metadata(Metadata.builder().putCustom(INGEST_GEOIP_CUSTOM_METADATA_TYPE, tasksCustomMetadata).put(idxMeta)); + PersistentTasksMetadataSection tasksCustomMetadata = new PersistentTasksMetadataSection(1L, Map.of()); + clusterStateBuilder.metadata( + Metadata.builder().putSection(INGEST_GEOIP_CUSTOM_METADATA_TYPE, tasksCustomMetadata).put(idxMeta) + ); } return clusterStateBuilder.nodes(discoveryNodesBuilder).build(); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java index 283e48a328aa7..5a1fc0af94e47 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingService.java @@ -186,7 +186,7 @@ public static Tuple migrateToDataTiersRouting( boolean dryRun ) { if (dryRun == false) { - IndexLifecycleMetadata currentMetadata = currentState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata currentMetadata = currentState.metadata().section(IndexLifecycleMetadata.TYPE); if (currentMetadata != null && currentILMMode(currentState) != STOPPED) { throw new IllegalStateException( "stop ILM before migrating to data tiers, current state is [" + currentILMMode(currentState) + "]" @@ -250,7 +250,7 @@ static List migrateIlmPolicies( Client client, XPackLicenseState licenseState ) { - IndexLifecycleMetadata currentLifecycleMetadata = currentState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata currentLifecycleMetadata = currentState.metadata().section(IndexLifecycleMetadata.TYPE); if (currentLifecycleMetadata == null) { return Collections.emptyList(); } @@ -280,7 +280,7 @@ static List migrateIlmPolicies( if (migratedPolicies.size() > 0) { IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentILMMode(currentState)); - mb.putCustom(IndexLifecycleMetadata.TYPE, newMetadata); + mb.putSection(IndexLifecycleMetadata.TYPE, newMetadata); } return migratedPolicies; } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java index 42d1955f0d453..c86d108896d16 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorService.java @@ -212,7 +212,7 @@ public String name() { @Override public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { final var currentState = clusterService.state(); - var ilmMetadata = currentState.metadata().custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + var ilmMetadata = currentState.metadata().section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); final var currentMode = currentILMMode(currentState); if (ilmMetadata.getPolicyMetadatas().isEmpty()) { return createIndicator( diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index f41524480e2df..52aa3c6733405 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.ActionResponse; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.settings.ClusterSettings; @@ -207,12 +207,12 @@ private static List xContentEntries() { return Arrays.asList( // Custom Metadata new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(IndexLifecycleMetadata.TYPE), parser -> IndexLifecycleMetadata.PARSER.parse(parser, null) ), new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(LifecycleOperationMetadata.TYPE), parser -> LifecycleOperationMetadata.PARSER.parse(parser, null) ), diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java index c2e2c80998992..f7502d4d5d0a9 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleService.java @@ -169,7 +169,7 @@ public ClusterState moveClusterStateToPreviouslyFailedStep(ClusterState currentS void onMaster(ClusterState clusterState) { maybeScheduleJob(); - final IndexLifecycleMetadata currentMetadata = clusterState.metadata().custom(IndexLifecycleMetadata.TYPE); + final IndexLifecycleMetadata currentMetadata = clusterState.metadata().section(IndexLifecycleMetadata.TYPE); if (currentMetadata != null) { OperationMode currentMode = currentILMMode(clusterState); if (OperationMode.STOPPED.equals(currentMode)) { @@ -334,11 +334,11 @@ public void clusterChanged(ClusterChangedEvent event) { public void applyClusterState(ClusterChangedEvent event) { if (event.localNodeMaster()) { // only act if we are master, otherwise // keep idle until elected - final IndexLifecycleMetadata ilmMetadata = event.state().metadata().custom(IndexLifecycleMetadata.TYPE); + final IndexLifecycleMetadata ilmMetadata = event.state().metadata().section(IndexLifecycleMetadata.TYPE); // only update the policy registry if we just became the master node or if the ilm metadata changed if (ilmMetadata != null && (event.previousState().nodes().isLocalNodeElectedMaster() == false - || ilmMetadata != event.previousState().metadata().custom(IndexLifecycleMetadata.TYPE))) { + || ilmMetadata != event.previousState().metadata().section(IndexLifecycleMetadata.TYPE))) { policyRegistry.update(ilmMetadata); } } @@ -373,7 +373,7 @@ public boolean policyExists(String policyId) { * @param fromClusterStateChange whether things are triggered from the cluster-state-listener or the scheduler */ void triggerPolicies(ClusterState clusterState, boolean fromClusterStateChange) { - IndexLifecycleMetadata currentMetadata = clusterState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata currentMetadata = clusterState.metadata().section(IndexLifecycleMetadata.TYPE); OperationMode currentMode = currentILMMode(clusterState); if (currentMetadata == null) { diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java index a87f2d4d2151e..e88c2191e7195 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransition.java @@ -129,7 +129,7 @@ static ClusterState moveClusterStateToStep( String policyName = idxMeta.getLifecyclePolicyName(); logger.info("moving index [{}] from [{}] to [{}] in policy [{}]", index.getName(), currentStepKey, newStepKey, policyName); - IndexLifecycleMetadata ilmMeta = state.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata ilmMeta = state.metadata().section(IndexLifecycleMetadata.TYPE); LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas().get(idxMeta.getLifecyclePolicyName()); LifecycleExecutionState lifecycleState = idxMeta.getLifecycleExecutionState(); LifecycleExecutionState newLifecycleState = updateExecutionStateToStep( @@ -155,7 +155,7 @@ static ClusterState moveClusterStateToErrorStep( BiFunction stepLookupFunction ) { IndexMetadata idxMeta = clusterState.getMetadata().index(index); - IndexLifecycleMetadata ilmMeta = clusterState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata ilmMeta = clusterState.metadata().section(IndexLifecycleMetadata.TYPE); LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas().get(idxMeta.getLifecyclePolicyName()); LifecycleExecutionState currentState = idxMeta.getLifecycleExecutionState(); Step.StepKey currentStep; @@ -226,7 +226,7 @@ static ClusterState moveClusterStateToPreviouslyFailedStep( if (currentStepKey != null && ErrorStep.NAME.equals(currentStepKey.name()) && Strings.isNullOrEmpty(failedStep) == false) { Step.StepKey nextStepKey = new Step.StepKey(currentStepKey.phase(), currentStepKey.action(), failedStep); validateTransition(indexMetadata, currentStepKey, nextStepKey, stepRegistry); - IndexLifecycleMetadata ilmMeta = currentState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata ilmMeta = currentState.metadata().section(IndexLifecycleMetadata.TYPE); LifecyclePolicyMetadata policyMetadata = ilmMeta.getPolicyMetadatas().get(indexMetadata.getLifecyclePolicyName()); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java index bcfb39eb98378..3cefce46aff8d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycleUsageTransportAction.java @@ -69,7 +69,7 @@ protected void masterOperation( ActionListener listener ) { Metadata metadata = state.metadata(); - IndexLifecycleMetadata lifecycleMetadata = metadata.custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata lifecycleMetadata = metadata.section(IndexLifecycleMetadata.TYPE); final IndexLifecycleFeatureSetUsage usage; if (lifecycleMetadata != null) { Map policyUsage = new HashMap<>(); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java index a532392f35080..f555b848c81f7 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportDeleteLifecycleAction.java @@ -91,14 +91,14 @@ public ClusterState execute(ClusterState currentState) { ); } ClusterState.Builder newState = ClusterState.builder(currentState); - IndexLifecycleMetadata currentMetadata = currentState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata currentMetadata = currentState.metadata().section(IndexLifecycleMetadata.TYPE); if (currentMetadata == null || currentMetadata.getPolicyMetadatas().containsKey(request.getPolicyName()) == false) { throw new ResourceNotFoundException("Lifecycle policy not found: {}", request.getPolicyName()); } SortedMap newPolicies = new TreeMap<>(currentMetadata.getPolicyMetadatas()); newPolicies.remove(request.getPolicyName()); IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentILMMode(currentState)); - newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); + newState.metadata(Metadata.builder(currentState.getMetadata()).putSection(IndexLifecycleMetadata.TYPE, newMetadata).build()); return newState.build(); } } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java index f4598727d6123..9a8d21a569210 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportGetLifecycleAction.java @@ -68,7 +68,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A return; } - IndexLifecycleMetadata metadata = clusterService.state().metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata metadata = clusterService.state().metadata().section(IndexLifecycleMetadata.TYPE); if (metadata == null) { if (request.getPolicyNames().length == 0) { listener.onResponse(new Response(Collections.emptyList())); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java index 48cf84ed7a6a4..f95ba046e2c8c 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportMigrateToDataTiersAction.java @@ -112,7 +112,7 @@ protected void masterOperation( return; } - IndexLifecycleMetadata currentMetadata = state.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata currentMetadata = state.metadata().section(IndexLifecycleMetadata.TYPE); if (currentMetadata != null && currentILMMode(state) != STOPPED) { listener.onFailure( new IllegalStateException("stop ILM before migrating to data tiers, current state is [" + currentILMMode(state) + "]") diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java index 92d182ea6d44a..8bcd4bfe5ab5b 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportPutLifecycleAction.java @@ -111,7 +111,7 @@ protected void masterOperation( LifecyclePolicy.validatePolicyName(request.getPolicy().getName()); { - IndexLifecycleMetadata lifecycleMetadata = state.metadata().custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + IndexLifecycleMetadata lifecycleMetadata = state.metadata().section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); LifecyclePolicyMetadata existingPolicy = lifecycleMetadata.getPolicyMetadatas().get(request.getPolicy().getName()); // Make the request a no-op if the policy and filtered headers match exactly if (isNoopUpdate(existingPolicy, request.getPolicy(), filteredHeaders)) { @@ -175,7 +175,7 @@ public UpdateLifecyclePolicyTask( @Override public ClusterState execute(ClusterState currentState) throws Exception { final IndexLifecycleMetadata currentMetadata = currentState.metadata() - .custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + .section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); final LifecyclePolicyMetadata existingPolicyMetadata = currentMetadata.getPolicyMetadatas().get(request.getPolicy().getName()); // Double-check for no-op in the state update task, in case it was changed/reset in the meantime @@ -203,7 +203,9 @@ public ClusterState execute(ClusterState currentState) throws Exception { } } IndexLifecycleMetadata newMetadata = new IndexLifecycleMetadata(newPolicies, currentILMMode(currentState)); - stateBuilder.metadata(Metadata.builder(currentState.getMetadata()).putCustom(IndexLifecycleMetadata.TYPE, newMetadata).build()); + stateBuilder.metadata( + Metadata.builder(currentState.getMetadata()).putSection(IndexLifecycleMetadata.TYPE, newMetadata).build() + ); ClusterState nonRefreshedState = stateBuilder.build(); if (oldPolicy == null) { return nonRefreshedState; @@ -299,7 +301,7 @@ private static void validatePrerequisites(LifecyclePolicy policy, ClusterState s WaitForSnapshotAction action = (WaitForSnapshotAction) phase.getActions().get(WaitForSnapshotAction.NAME); String slmPolicy = action.getPolicy(); if (state.metadata() - .custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY) + .section(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY) .getSnapshotConfigurations() .get(slmPolicy) == null) { throw new IllegalArgumentException( diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java index 51df651ea4a4c..8593b170454a9 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/cluster/metadata/MetadataMigrateToDataTiersRoutingServiceTests.java @@ -116,7 +116,7 @@ public void testMigrateIlmPolicyForIndexWithoutILMMetadata() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), @@ -134,7 +134,7 @@ public void testMigrateIlmPolicyForIndexWithoutILMMetadata() { assertThat(migratedPolicies.get(0), is(lifecycleName)); ClusterState newState = ClusterState.builder(state).metadata(newMetadata).build(); - IndexLifecycleMetadata updatedLifecycleMetadata = newState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata updatedLifecycleMetadata = newState.metadata().section(IndexLifecycleMetadata.TYPE); LifecyclePolicy lifecyclePolicy = updatedLifecycleMetadata.getPolicies().get(lifecycleName); Map warmActions = lifecyclePolicy.getPhases().get("warm").getActions(); assertThat( @@ -184,7 +184,7 @@ public void testMigrateIlmPolicyForPhaseWithDeactivatedMigrateAction() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), @@ -202,7 +202,7 @@ public void testMigrateIlmPolicyForPhaseWithDeactivatedMigrateAction() { assertThat(migratedPolicies.get(0), is(lifecycleName)); ClusterState newState = ClusterState.builder(state).metadata(newMetadata).build(); - IndexLifecycleMetadata updatedLifecycleMetadata = newState.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata updatedLifecycleMetadata = newState.metadata().section(IndexLifecycleMetadata.TYPE); LifecyclePolicy lifecyclePolicy = updatedLifecycleMetadata.getPolicies().get(lifecycleName); Map warmActions = lifecyclePolicy.getPhases().get("warm").getActions(); assertThat( @@ -243,7 +243,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), @@ -299,7 +299,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap( @@ -350,7 +350,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), @@ -404,7 +404,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), @@ -454,7 +454,7 @@ public void testMigrateIlmPolicyRefreshesCachedPhase() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Collections.singletonMap(policyMetadata.getName(), policyMetadata), @@ -1039,7 +1039,7 @@ public void testMigrateToDataTiersRouting() { ClusterState state = ClusterState.builder(ClusterName.DEFAULT) .metadata( Metadata.builder() - .putCustom( + .putSection( IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata( Map.of( @@ -1147,7 +1147,7 @@ public void testMigrateToDataTiersRoutingRequiresILMStopped() { { ClusterState ilmRunningState = ClusterState.builder(ClusterName.DEFAULT) .metadata( - Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING)) + Metadata.builder().putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING)) ) .build(); IllegalStateException illegalStateException = expectThrows( @@ -1160,7 +1160,7 @@ public void testMigrateToDataTiersRoutingRequiresILMStopped() { { ClusterState ilmStoppingState = ClusterState.builder(ClusterName.DEFAULT) .metadata( - Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPING)) + Metadata.builder().putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPING)) ) .build(); IllegalStateException illegalStateException = expectThrows( @@ -1173,7 +1173,7 @@ public void testMigrateToDataTiersRoutingRequiresILMStopped() { { ClusterState ilmStoppedState = ClusterState.builder(ClusterName.DEFAULT) .metadata( - Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPED)) + Metadata.builder().putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPED)) ) .build(); Tuple migratedState = migrateToDataTiersRouting( @@ -1195,7 +1195,7 @@ public void testDryRunDoesntRequireILMStopped() { { ClusterState ilmRunningState = ClusterState.builder(ClusterName.DEFAULT) .metadata( - Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING)) + Metadata.builder().putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.RUNNING)) ) .build(); migrateToDataTiersRouting(ilmRunningState, "data", "catch-all", REGISTRY, client, null, true); @@ -1205,7 +1205,7 @@ public void testDryRunDoesntRequireILMStopped() { { ClusterState ilmStoppingState = ClusterState.builder(ClusterName.DEFAULT) .metadata( - Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPING)) + Metadata.builder().putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Map.of(), OperationMode.STOPPING)) ) .build(); migrateToDataTiersRouting(ilmStoppingState, "data", "catch-all", REGISTRY, client, null, true); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java index b3146e81d08fc..4183565e99489 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/ExecuteStepsUpdateTaskTests.java @@ -150,7 +150,7 @@ private void setupIndexPolicy(String policyName) { index = indexMetadata.getIndex(); Metadata metadata = Metadata.builder() .persistentSettings(settings(IndexVersion.current()).build()) - .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata) .put(IndexMetadata.builder(indexMetadata)) .build(); String nodeId = randomAlphaOfLength(10); @@ -162,7 +162,7 @@ private void setupIndexPolicy(String policyName) { .metadata(metadata) .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) .build(); - policyStepsRegistry.update(clusterState.metadata().custom(IndexLifecycleMetadata.TYPE)); + policyStepsRegistry.update(clusterState.metadata().section(IndexLifecycleMetadata.TYPE)); } public void testNeverExecuteNonClusterStateStep() throws Exception { @@ -225,7 +225,7 @@ public void testExecuteInvalidStartStep() throws Exception { ) .build(); - policyStepsRegistry.update(clusterState.metadata().custom(IndexLifecycleMetadata.TYPE)); + policyStepsRegistry.update(clusterState.metadata().section(IndexLifecycleMetadata.TYPE)); Step invalidStep = new MockClusterStateActionStep(firstStepKey, secondStepKey); long now = randomNonNegativeLong(); @@ -341,6 +341,6 @@ private void setStateToKey(StepKey stepKey) throws IOException { ) ) .build(); - policyStepsRegistry.update(clusterState.metadata().custom(IndexLifecycleMetadata.TYPE)); + policyStepsRegistry.update(clusterState.metadata().section(IndexLifecycleMetadata.TYPE)); } } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java index 9e2a67caac253..24917a222e66d 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IlmHealthIndicatorServiceTests.java @@ -295,7 +295,7 @@ public void testMappedFieldsForTelemetry() { private static ClusterState createClusterStateWith(IndexLifecycleMetadata metadata) { var builder = new ClusterState.Builder(new ClusterName("test-cluster")); if (metadata != null) { - builder.metadata(new Metadata.Builder().putCustom(IndexLifecycleMetadata.TYPE, metadata)); + builder.metadata(new Metadata.Builder().putSection(IndexLifecycleMetadata.TYPE, metadata)); } return builder.build(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java index d81faf6a398d7..7e68b3d822656 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleInfoTransportActionTests.java @@ -114,7 +114,7 @@ private ClusterState buildClusterState(List lifecyclePolicies, .collect(Collectors.toMap(LifecyclePolicyMetadata::getName, Function.identity())); IndexLifecycleMetadata indexLifecycleMetadata = new IndexLifecycleMetadata(lifecyclePolicyMetadatasMap, OperationMode.RUNNING); - Metadata.Builder metadata = Metadata.builder().putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata); + Metadata.Builder metadata = Metadata.builder().putSection(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata); indexPolicies.forEach((indexName, policyName) -> { Settings indexSettings = indexSettings(IndexVersion.current(), 1, 0).put(LifecycleSettings.LIFECYCLE_NAME, policyName).build(); IndexMetadata.Builder indexMetadata = IndexMetadata.builder(indexName).settings(indexSettings); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java index e757488c2690e..2a323347b2df2 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java @@ -10,7 +10,7 @@ import org.elasticsearch.cluster.ClusterModule; import org.elasticsearch.cluster.Diff; import org.elasticsearch.cluster.metadata.Metadata; -import org.elasticsearch.cluster.metadata.Metadata.Custom; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; import org.elasticsearch.common.io.stream.Writeable.Reader; import org.elasticsearch.common.util.Maps; @@ -55,7 +55,7 @@ import static org.elasticsearch.xpack.ilm.LifecyclePolicyTestsUtils.newTestLifecyclePolicy; import static org.elasticsearch.xpack.ilm.LifecyclePolicyTestsUtils.randomTimeseriesLifecyclePolicy; -public class IndexLifecycleMetadataTests extends ChunkedToXContentDiffableSerializationTestCase { +public class IndexLifecycleMetadataTests extends ChunkedToXContentDiffableSerializationTestCase { @Override protected IndexLifecycleMetadata createTestInstance() { @@ -77,7 +77,7 @@ protected IndexLifecycleMetadata doParseInstance(XContentParser parser) { } @Override - protected Reader instanceReader() { + protected Reader instanceReader() { return IndexLifecycleMetadata::new; } @@ -144,7 +144,7 @@ protected NamedXContentRegistry xContentRegistry() { } @Override - protected Metadata.Custom mutateInstance(Custom instance) { + protected MetadataSection mutateInstance(MetadataSection instance) { IndexLifecycleMetadata metadata = (IndexLifecycleMetadata) instance; Map policies = metadata.getPolicyMetadatas(); policies = new TreeMap<>(policies); @@ -167,12 +167,12 @@ protected Metadata.Custom mutateInstance(Custom instance) { } @Override - protected Custom makeTestChanges(Custom testInstance) { + protected MetadataSection makeTestChanges(MetadataSection testInstance) { return mutateInstance(testInstance); } @Override - protected Reader> diffReader() { + protected Reader> diffReader() { return IndexLifecycleMetadataDiff::new; } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java index 8a4859fcd8b77..553cb68bc395c 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleRunnerTests.java @@ -290,7 +290,7 @@ public void testRunStateChangePolicyWithNoNextStep() throws Exception { DiscoveryNode node = clusterService.localNode(); IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .metadata(Metadata.builder().put(indexMetadata, true).putSection(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) .build(); ClusterServiceUtils.setState(clusterService, state); @@ -342,7 +342,7 @@ public void testRunStateChangePolicyWithNextStep() throws Exception { DiscoveryNode node = clusterService.localNode(); IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .metadata(Metadata.builder().put(indexMetadata, true).putSection(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) .build(); ClusterServiceUtils.setState(clusterService, state); @@ -429,7 +429,7 @@ public void doTestRunPolicyWithFailureToReadPolicy(boolean asyncAction, boolean DiscoveryNode node = clusterService.localNode(); IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .metadata(Metadata.builder().put(indexMetadata, true).putSection(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) .build(); ClusterServiceUtils.setState(clusterService, state); @@ -478,7 +478,7 @@ public void testRunAsyncActionDoesNotRun() { DiscoveryNode node = clusterService.localNode(); IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .metadata(Metadata.builder().put(indexMetadata, true).putSection(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) .build(); ClusterServiceUtils.setState(clusterService, state); @@ -528,7 +528,7 @@ public void testRunStateChangePolicyWithAsyncActionNextStep() throws Exception { DiscoveryNode node = clusterService.localNode(); IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .metadata(Metadata.builder().put(indexMetadata, true).putSection(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) .build(); logger.info("--> state: {}", state); @@ -605,7 +605,7 @@ public void testRunPeriodicStep() throws Exception { DiscoveryNode node = clusterService.localNode(); IndexLifecycleMetadata ilm = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); ClusterState state = ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().put(indexMetadata, true).putCustom(IndexLifecycleMetadata.TYPE, ilm)) + .metadata(Metadata.builder().put(indexMetadata, true).putSection(IndexLifecycleMetadata.TYPE, ilm)) .nodes(DiscoveryNodes.builder().add(node).masterNodeId(node.getId()).localNodeId(node.getId())) .build(); logger.info("--> state: {}", state); diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java index 209839c9d24df..f736c5f2c4f07 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleServiceTests.java @@ -170,7 +170,7 @@ public void testStoppedModeSkip() { .build(); Map indices = Map.of(index.getName(), indexMetadata); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPED)) + .putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPED)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -212,7 +212,7 @@ public void testRequestedStopOnShrink() { .build(); Map indices = Map.of(index.getName(), indexMetadata); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -271,7 +271,7 @@ private void verifyCanStopWithStep(String stoppableStep) { .build(); Map indices = Map.of(index.getName(), indexMetadata); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -322,7 +322,7 @@ public void testRequestedStopOnSafeAction() { .build(); Map indices = Map.of(index.getName(), indexMetadata); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) + .putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.STOPPING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -443,7 +443,7 @@ public void doTestExceptionStillProcessesOtherIndices(boolean useOnMaster) { Map indices = Map.of(index1.getName(), i1indexMetadata, index2.getName(), i2indexMetadata); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING)) + .putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -580,7 +580,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { Map indices = Map.of("no_danger", nonDangerousIndex, "danger", dangerousIndex); Metadata metadata = Metadata.builder() - .putCustom(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)) + .putSection(IndexLifecycleMetadata.TYPE, new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING)) .indices(indices) .persistentSettings(settings(IndexVersion.current()).build()) .build(); @@ -621,7 +621,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { state = ClusterState.builder(state) .metadata( Metadata.builder(state.metadata()) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( @@ -655,7 +655,7 @@ public void testIndicesOnShuttingDownNodesInDangerousStep() { state = ClusterState.builder(state) .metadata( Metadata.builder(state.metadata()) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Collections.singletonMap( diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java index 9449e0c0574dc..8daa1e91dbb87 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleTransitionTests.java @@ -1311,7 +1311,7 @@ private ClusterState buildClusterState( Metadata metadata = Metadata.builder() .put(indexMetadata, true) - .putCustom(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata) + .putSection(IndexLifecycleMetadata.TYPE, indexLifecycleMetadata) .build(); return ClusterState.builder(new ClusterName("my_cluster")).metadata(metadata).build(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java index eee3fe3ce53c2..dd8c33e0697e2 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToErrorStepUpdateTaskTests.java @@ -62,7 +62,7 @@ public void setupClusterState() { Metadata metadata = Metadata.builder() .persistentSettings(settings(IndexVersion.current()).build()) .put(IndexMetadata.builder(indexMetadata)) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build(); clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java index f9a8d4a2ab486..ca49432adf2a4 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/MoveToNextStepUpdateTaskTests.java @@ -76,7 +76,7 @@ public void setupClusterState() { Metadata metadata = Metadata.builder() .persistentSettings(settings(IndexVersion.current()).build()) .put(IndexMetadata.builder(indexMetadata)) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build(); clusterState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java index 36d537a57382c..ff02f452c2080 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/PolicyStepsRegistryTests.java @@ -202,7 +202,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { lifecycleState.setPhase("new"); Metadata metadata = Metadata.builder() .persistentSettings(settings(IndexVersion.current()).build()) - .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata) .put( IndexMetadata.builder("test") .settings( @@ -233,7 +233,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { PolicyStepsRegistry registry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client, null); // add new policy - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); assertThat(registry.getFirstStep(newPolicy.getName()), equalTo(policySteps.get(0))); assertThat(registry.getLifecyclePolicyMap().size(), equalTo(1)); @@ -257,7 +257,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { ) .nodes(DiscoveryNodes.builder().localNodeId(nodeId).masterNodeId(nodeId).add(masterNode).build()) .build(); - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); assertThat(registeredStepsForPolicy.get(step.getKey()), equalTo(step)); assertThat(registry.getStep(metadata.index(index), step.getKey()), equalTo(step)); } @@ -265,7 +265,7 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { Map registryPolicyMap = registry.getLifecyclePolicyMap(); Map registryFirstStepMap = registry.getFirstStepMap(); Map> registryStepMap = registry.getStepMap(); - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); assertThat(registry.getLifecyclePolicyMap(), equalTo(registryPolicyMap)); assertThat(registry.getFirstStepMap(), equalTo(registryFirstStepMap)); assertThat(registry.getStepMap(), equalTo(registryStepMap)); @@ -273,9 +273,9 @@ public void testUpdateFromNothingToSomethingToNothing() throws Exception { // remove policy lifecycleMetadata = new IndexLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING); currentState = ClusterState.builder(currentState) - .metadata(Metadata.builder(metadata).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)) + .metadata(Metadata.builder(metadata).putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata)) .build(); - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); assertTrue(registry.getLifecyclePolicyMap().isEmpty()); assertTrue(registry.getFirstStepMap().isEmpty()); assertTrue(registry.getStepMap().isEmpty()); @@ -298,7 +298,7 @@ public void testUpdateChangedPolicy() { IndexLifecycleMetadata lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); Metadata metadata = Metadata.builder() .persistentSettings(settings(IndexVersion.current()).build()) - .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata) .build(); String nodeId = randomAlphaOfLength(10); DiscoveryNode masterNode = DiscoveryNodeUtils.builder(nodeId) @@ -311,7 +311,7 @@ public void testUpdateChangedPolicy() { .build(); PolicyStepsRegistry registry = new PolicyStepsRegistry(NamedXContentRegistry.EMPTY, client, null); // add new policy - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); // swap out policy newPolicy = LifecyclePolicyTests.randomTestLifecyclePolicy(policyName); @@ -323,9 +323,9 @@ public void testUpdateChangedPolicy() { OperationMode.RUNNING ); currentState = ClusterState.builder(currentState) - .metadata(Metadata.builder(metadata).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata)) + .metadata(Metadata.builder(metadata).putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata)) .build(); - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); // TODO(talevy): assert changes... right now we do not support updates to policies. will require internal cleanup } @@ -366,7 +366,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except lifecycleState.setPhaseDefinition(phaseJson); Metadata metadata = Metadata.builder() .persistentSettings(settings(IndexVersion.current()).build()) - .putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata) + .putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata) .put( IndexMetadata.builder("test") .settings( @@ -397,7 +397,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except PolicyStepsRegistry registry = new PolicyStepsRegistry(REGISTRY, client, null); // add new policy - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); Map registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); Step shrinkStep = registeredStepsForPolicy.entrySet() @@ -416,7 +416,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except new LifecyclePolicyMetadata(updatedPolicy, headers, randomNonNegativeLong(), randomNonNegativeLong()) ); lifecycleMetadata = new IndexLifecycleMetadata(policyMap, OperationMode.RUNNING); - metadata = Metadata.builder(metadata).putCustom(IndexLifecycleMetadata.TYPE, lifecycleMetadata).build(); + metadata = Metadata.builder(metadata).putSection(IndexLifecycleMetadata.TYPE, lifecycleMetadata).build(); try (XContentBuilder builder = JsonXContent.contentBuilder()) { builder.startObject(); ChunkedToXContent.wrapAsToXContent(metadata).toXContent(builder, ToXContent.EMPTY_PARAMS); @@ -426,7 +426,7 @@ public void testUpdatePolicyButNoPhaseChangeIndexStepsDontChange() throws Except currentState = ClusterState.builder(ClusterName.DEFAULT).metadata(metadata).build(); // Update the policies - registry.update(currentState.metadata().custom(IndexLifecycleMetadata.TYPE)); + registry.update(currentState.metadata().section(IndexLifecycleMetadata.TYPE)); registeredStepsForPolicy = registry.getStepMap().get(newPolicy.getName()); shrinkStep = registeredStepsForPolicy.entrySet() diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java index 3f3285c5c2bd7..1f20f0139e07a 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/action/ReservedLifecycleStateServiceTests.java @@ -203,7 +203,7 @@ public void testActionAddRemove() throws Exception { assertThat(updatedState.keys(), containsInAnyOrder("my_timeseries_lifecycle", "my_timeseries_lifecycle1")); IndexLifecycleMetadata ilmMetadata = updatedState.state() .metadata() - .custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + .section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); assertThat(ilmMetadata.getPolicyMetadatas().keySet(), containsInAnyOrder("my_timeseries_lifecycle", "my_timeseries_lifecycle1")); String onePolicyRemovedJSON = """ @@ -222,7 +222,7 @@ public void testActionAddRemove() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, onePolicyRemovedJSON); assertThat(updatedState.keys(), containsInAnyOrder("my_timeseries_lifecycle")); - ilmMetadata = updatedState.state().metadata().custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + ilmMetadata = updatedState.state().metadata().section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); assertThat(ilmMetadata.getPolicyMetadatas().keySet(), containsInAnyOrder("my_timeseries_lifecycle")); String onePolicyRenamedJSON = """ @@ -241,7 +241,7 @@ public void testActionAddRemove() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, onePolicyRenamedJSON); assertThat(updatedState.keys(), containsInAnyOrder("my_timeseries_lifecycle2")); - ilmMetadata = updatedState.state().metadata().custom(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); + ilmMetadata = updatedState.state().metadata().section(IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata.EMPTY); assertThat(ilmMetadata.getPolicyMetadatas().keySet(), containsInAnyOrder("my_timeseries_lifecycle2")); } diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java index 90f9c721d25a9..56daa77bea26d 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeDataFrameAnalyticsIntegTestCase.java @@ -17,7 +17,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchModule; import org.elasticsearch.tasks.TaskInfo; @@ -280,9 +280,9 @@ private void assertInferenceModelPersisted(String jobId, Matcher> analyticsTaskList() { + protected Collection> analyticsTaskList() { ClusterState masterClusterState = clusterAdmin().prepareState().all().get().getState(); - PersistentTasksCustomMetadata persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = masterClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); return persistentTasks != null ? persistentTasks.findTasks(MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, task -> true) : Collections.emptyList(); diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java index ca5ecd80a83bb..d0ede29f3d8bf 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/MlNativeIntegTestCase.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.ComposableIndexTemplate; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.metadata.Template; import org.elasticsearch.common.Strings; import org.elasticsearch.common.compress.CompressedXContent; @@ -337,7 +337,7 @@ protected void ensureClusterStateConsistency() throws IOException { entries.addAll(new SearchModule(Settings.EMPTY, Collections.emptyList()).getNamedWriteables()); entries.add( new NamedWriteableRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata::fromStream ) @@ -349,16 +349,16 @@ protected void ensureClusterStateConsistency() throws IOException { TrainedModelAssignmentMetadata::readDiffFrom ) ); - entries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, ModelAliasMetadata.NAME, ModelAliasMetadata::new)); + entries.add(new NamedWriteableRegistry.Entry(MetadataSection.class, ModelAliasMetadata.NAME, ModelAliasMetadata::new)); entries.add(new NamedWriteableRegistry.Entry(NamedDiff.class, ModelAliasMetadata.NAME, ModelAliasMetadata::readDiffFrom)); entries.add( - new NamedWriteableRegistry.Entry(Metadata.Custom.class, TrainedModelCacheMetadata.NAME, TrainedModelCacheMetadata::new) + new NamedWriteableRegistry.Entry(MetadataSection.class, TrainedModelCacheMetadata.NAME, TrainedModelCacheMetadata::new) ); entries.add( new NamedWriteableRegistry.Entry(NamedDiff.class, TrainedModelCacheMetadata.NAME, TrainedModelCacheMetadata::readDiffFrom) ); - entries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, "ml", MlMetadata::new)); - entries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new)); + entries.add(new NamedWriteableRegistry.Entry(MetadataSection.class, "ml", MlMetadata::new)); + entries.add(new NamedWriteableRegistry.Entry(MetadataSection.class, IndexLifecycleMetadata.TYPE, IndexLifecycleMetadata::new)); entries.add( new NamedWriteableRegistry.Entry( LifecycleType.class, @@ -395,7 +395,7 @@ protected void ensureClusterStateConsistency() throws IOException { ) ); entries.add(new NamedWriteableRegistry.Entry(ClusterState.Custom.class, TokenMetadata.TYPE, TokenMetadata::new)); - entries.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, AutoscalingMetadata.NAME, AutoscalingMetadata::new)); + entries.add(new NamedWriteableRegistry.Entry(MetadataSection.class, AutoscalingMetadata.NAME, AutoscalingMetadata::new)); entries.add( new NamedWriteableRegistry.Entry( NamedDiff.class, diff --git a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java index 1233004552023..9d6936997e573 100644 --- a/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java +++ b/x-pack/plugin/ml/qa/native-multi-node-tests/src/javaRestTest/java/org/elasticsearch/xpack/ml/integration/SetUpgradeModeIT.java @@ -8,7 +8,7 @@ import org.elasticsearch.ElasticsearchStatusException; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.GetDatafeedsStatsAction; @@ -64,7 +64,7 @@ public void testEnableUpgradeMode() throws Exception { ClusterState masterClusterState = clusterAdmin().prepareState().all().get().getState(); - PersistentTasksCustomMetadata persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = masterClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true), hasSize(1)); assertThat(persistentTasks.findTasks(MlTasks.JOB_TASK_NAME, task -> true), hasSize(1)); @@ -74,7 +74,7 @@ public void testEnableUpgradeMode() throws Exception { masterClusterState = clusterAdmin().prepareState().all().get().getState(); // Assert state for tasks still exists and that the upgrade setting is set - persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + persistentTasks = masterClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true), hasSize(1)); assertThat(persistentTasks.findTasks(MlTasks.JOB_TASK_NAME, task -> true), hasSize(1)); @@ -101,7 +101,7 @@ public void testEnableUpgradeMode() throws Exception { masterClusterState = clusterAdmin().prepareState().all().get().getState(); - persistentTasks = masterClusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + persistentTasks = masterClusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); assertThat(persistentTasks.findTasks(MlTasks.DATAFEED_TASK_NAME, task -> true), hasSize(1)); assertThat(persistentTasks.findTasks(MlTasks.JOB_TASK_NAME, task -> true), hasSize(1)); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java index 592f42e13e301..165637910fffe 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/license/MachineLearningLicensingIT.java @@ -26,7 +26,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.license.License.OperationMode; import org.elasticsearch.license.internal.XPackLicenseStatus; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.aggregations.bucket.terms.TermsAggregationBuilder; import org.elasticsearch.search.aggregations.metrics.AvgAggregationBuilder; @@ -252,7 +252,7 @@ public void testAutoCloseJobWithDatafeed() throws Exception { assertEquals(DatafeedState.STOPPED, datafeedState); ClusterState state = clusterAdmin().prepareState().get().getState(); - List> tasks = findTasks(state, RELATED_TASKS); + List> tasks = findTasks(state, RELATED_TASKS); assertEquals(0, tasks.size()); }); @@ -277,7 +277,7 @@ public void testAutoCloseJobWithDatafeed() throws Exception { assertEquals(DatafeedState.STARTED, datafeedState); ClusterState state = clusterAdmin().prepareState().get().getState(); - List> tasks = findTasks(state, RELATED_TASKS); + List> tasks = findTasks(state, RELATED_TASKS); assertEquals(2, tasks.size()); }); @@ -297,7 +297,7 @@ public void testAutoCloseJobWithDatafeed() throws Exception { assertEquals(DatafeedState.STOPPED, datafeedState); ClusterState state = clusterAdmin().prepareState().get().getState(); - List> tasks = findTasks(state, RELATED_TASKS); + List> tasks = findTasks(state, RELATED_TASKS); assertEquals(0, tasks.size()); }); } @@ -337,7 +337,7 @@ public void testMachineLearningStartDatafeedActionRestricted() throws Exception JobState jobState = getJobStats(jobId).getState(); assertEquals(JobState.CLOSED, jobState); ClusterState state = clusterAdmin().prepareState().get().getState(); - List> tasks = findTasks(state, RELATED_TASKS); + List> tasks = findTasks(state, RELATED_TASKS); assertEquals(0, tasks.size()); }); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java index 6dbec53994b2e..7c83c1c971d85 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/BasicDistributedJobsIT.java @@ -16,8 +16,8 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.CheckedRunnable; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; @@ -231,7 +231,7 @@ public void testDedicatedMlNode() throws Exception { client().execute(OpenJobAction.INSTANCE, openJobRequest).actionGet(); assertBusy(() -> { ClusterState clusterState = clusterAdmin().prepareState().get().getState(); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); PersistentTask task = tasks.getTask(MlTasks.jobTaskId(jobId)); DiscoveryNode node = clusterState.nodes().resolveNode(task.getExecutorNode()); @@ -278,7 +278,7 @@ public void testMaxConcurrentJobAllocations() throws Exception { // Sample each cs update and keep track each time a node holds more than `maxConcurrentJobAllocations` opening jobs. List violations = new CopyOnWriteArrayList<>(); internalCluster().clusterService(nonMlNode).addListener(event -> { - PersistentTasksCustomMetadata tasks = event.state().metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = event.state().metadata().section(PersistentTasksMetadataSection.TYPE); if (tasks == null) { return; } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java index e29cd4545846c..86079791d49ad 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DataFrameAnalyticsConfigProviderIT.java @@ -15,7 +15,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -376,21 +376,21 @@ public void testUpdate_UpdateCannotBeAppliedWhenTaskIsRunning() throws Interrupt } private static ClusterState clusterStateWithRunningAnalyticsTask(String analyticsId, DataFrameAnalyticsState analyticsState) { - PersistentTasksCustomMetadata.Builder builder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder builder = PersistentTasksMetadataSection.builder(); builder.addTask( MlTasks.dataFrameAnalyticsTaskId(analyticsId), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams(analyticsId, MlConfigVersion.CURRENT, false), - new PersistentTasksCustomMetadata.Assignment("node", "test assignment") + new PersistentTasksMetadataSection.Assignment("node", "test assignment") ); builder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId(analyticsId), new DataFrameAnalyticsTaskState(analyticsState, builder.getLastAllocationId(), null, Instant.now()) ); - PersistentTasksCustomMetadata tasks = builder.build(); + PersistentTasksMetadataSection tasks = builder.build(); return ClusterState.builder(new ClusterName("cluster")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks).build()) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasks).build()) .build(); } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java index 1561520510c38..b19fbcb137080 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/DatafeedConfigProviderIT.java @@ -13,7 +13,7 @@ import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.Tuple; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.MlConfigIndex; @@ -391,15 +391,15 @@ public void testExpandDatafeedsWithTaskData() throws Exception { putDatafeedConfig(createDatafeedConfig("foo-2", "j2"), Collections.emptyMap()); client().admin().indices().prepareRefresh(MlConfigIndex.indexName()).get(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.datafeedTaskId("foo-1"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("foo-1", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); AtomicReference exceptionHolder = new AtomicReference<>(); AtomicReference> datafeedIdsHolder = new AtomicReference<>(); // Test datafeed IDs only diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java index bca437dbf676c..5c07b5aa9f967 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/JobConfigProviderIT.java @@ -12,7 +12,7 @@ import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.delete.DeleteResponse; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.AggregatorFactories; import org.elasticsearch.search.aggregations.bucket.histogram.HistogramAggregationBuilder; @@ -487,15 +487,15 @@ public void testExpandJobIdsWithTaskData() throws Exception { client().admin().indices().prepareRefresh(MlConfigIndex.indexName()).get(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.jobTaskId("foo-2"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("foo-2"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); AtomicReference exceptionHolder = new AtomicReference<>(); AtomicReference> jobIdsHolder = new AtomicReference<>(); diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java index 33fd7c108863b..188692c8f7134 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/MlDistributedFailureIT.java @@ -27,7 +27,7 @@ import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.persistent.PersistentTaskResponse; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.UpdatePersistentTaskStatusAction; import org.elasticsearch.xcontent.ToXContent; import org.elasticsearch.xcontent.XContentBuilder; @@ -284,8 +284,8 @@ public void testCloseUnassignedFailedJobAndStopUnassignedStoppingDatafeed() thro // using externally accessible actions. The only way this situation could occur in reality is through extremely unfortunate // timing. Therefore, to simulate this unfortunate timing we cheat and access internal classes to set the datafeed state to // stopping. - PersistentTasksCustomMetadata tasks = clusterService().state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask task = MlTasks.getDatafeedTask(datafeedId, tasks); + PersistentTasksMetadataSection tasks = clusterService().state().getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask task = MlTasks.getDatafeedTask(datafeedId, tasks); // It is possible that the datafeed has already detected the job failure and // terminated itself. In this happens there is no persistent task to stop @@ -719,11 +719,11 @@ private void run(String jobId, CheckedRunnable disrupt) throws Except // in a Lucene index it can take a while to update when there are many updates in quick // succession, like we see in internal cluster tests of node failure scenarios awaitClusterState(state -> { - List> tasks = findTasks(state, Set.of(DATAFEED_TASK_NAME, JOB_TASK_NAME)); + List> tasks = findTasks(state, Set.of(DATAFEED_TASK_NAME, JOB_TASK_NAME)); if (tasks == null || tasks.size() != 2) { return false; } - for (PersistentTasksCustomMetadata.PersistentTask task : tasks) { + for (PersistentTasksMetadataSection.PersistentTask task : tasks) { if (needsReassignment(task.getAssignment(), state.nodes())) { return false; } diff --git a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java index b62a524245d88..6d74f0cb624ce 100644 --- a/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java +++ b/x-pack/plugin/ml/src/internalClusterTest/java/org/elasticsearch/xpack/ml/integration/TooManyJobsIT.java @@ -14,7 +14,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.CloseJobAction; @@ -64,10 +64,10 @@ public void testCloseFailedJob() throws Exception { ).actionGet(); assertEquals(statsResponse.getResponse().results().get(0).getState(), JobState.CLOSED); ClusterState state = clusterAdmin().prepareState().get().getState(); - List> tasks = findTasks(state, MlTasks.JOB_TASK_NAME); + List> tasks = findTasks(state, MlTasks.JOB_TASK_NAME); assertEquals(1, tasks.size()); // now just double check that the first job is still opened: - PersistentTasksCustomMetadata.PersistentTask task = tasks.get(0); + PersistentTasksMetadataSection.PersistentTask task = tasks.get(0); assertEquals(task.getId(), MlTasks.jobTaskId("close-failed-job-1")); assertEquals(JobState.OPENED, ((JobTaskState) task.getState()).getState()); } @@ -213,13 +213,13 @@ private void verifyMaxNumberOfJobsLimit(int numNodes, int maxNumberOfJobsPerNode client().execute(OpenJobAction.INSTANCE, openJobRequest).actionGet(); assertBusy(() -> { for (Client client : clients()) { - PersistentTasksCustomMetadata tasks = client.admin() + PersistentTasksMetadataSection tasks = client.admin() .cluster() .prepareState() .get() .getState() .getMetadata() - .custom(PersistentTasksCustomMetadata.TYPE); + .section(PersistentTasksMetadataSection.TYPE); assertEquals(MlTasks.getJobState(job.getId(), tasks), JobState.OPENED); } }); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java index c4bf92401be9d..eff0052157c95 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MachineLearning.java @@ -24,7 +24,7 @@ import org.elasticsearch.cluster.NamedDiff; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexTemplateMetadata; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.metadata.SingleNodeShutdownMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; @@ -1835,21 +1835,21 @@ public List getNamedXContent() { namedXContent.addAll(new MlModelSizeNamedXContentProvider().getNamedXContentParsers()); namedXContent.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField((TrainedModelCacheMetadata.NAME)), TrainedModelCacheMetadata::fromXContent ) ); namedXContent.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(ModelAliasMetadata.NAME), ModelAliasMetadata::fromXContent ) ); namedXContent.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(TrainedModelAssignmentMetadata.NAME), TrainedModelAssignmentMetadata::fromXContent ) @@ -1858,7 +1858,7 @@ public List getNamedXContent() { // has no control over this. So, simply read it without logging a deprecation warning namedXContent.add( new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(TrainedModelAssignmentMetadata.DEPRECATED_NAME), TrainedModelAssignmentMetadata::fromXContent ) @@ -1876,19 +1876,19 @@ public List getNamedWriteables() { List namedWriteables = new ArrayList<>(); // Custom metadata - namedWriteables.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, "ml", MlMetadata::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(MetadataSection.class, "ml", MlMetadata::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, "ml", MlMetadata.MlMetadataDiff::new)); namedWriteables.add( - new NamedWriteableRegistry.Entry(Metadata.Custom.class, TrainedModelCacheMetadata.NAME, TrainedModelCacheMetadata::new) + new NamedWriteableRegistry.Entry(MetadataSection.class, TrainedModelCacheMetadata.NAME, TrainedModelCacheMetadata::new) ); namedWriteables.add( new NamedWriteableRegistry.Entry(NamedDiff.class, TrainedModelCacheMetadata.NAME, TrainedModelCacheMetadata::readDiffFrom) ); - namedWriteables.add(new NamedWriteableRegistry.Entry(Metadata.Custom.class, ModelAliasMetadata.NAME, ModelAliasMetadata::new)); + namedWriteables.add(new NamedWriteableRegistry.Entry(MetadataSection.class, ModelAliasMetadata.NAME, ModelAliasMetadata::new)); namedWriteables.add(new NamedWriteableRegistry.Entry(NamedDiff.class, ModelAliasMetadata.NAME, ModelAliasMetadata::readDiffFrom)); namedWriteables.add( new NamedWriteableRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata::fromStream ) @@ -1902,7 +1902,7 @@ public List getNamedWriteables() { ); namedWriteables.add( new NamedWriteableRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, TrainedModelAssignmentMetadata.DEPRECATED_NAME, TrainedModelAssignmentMetadata::fromStreamOld ) diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java index f3774c8068489..6f05923cf44a6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlAssignmentNotifier.java @@ -15,9 +15,9 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -103,8 +103,8 @@ public void clusterChanged(ClusterChangedEvent event) { private void auditChangesToMlTasks(ClusterChangedEvent event) { - PersistentTasksCustomMetadata previousTasks = event.previousState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata currentTasks = event.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection previousTasks = event.previousState().getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection currentTasks = event.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); if (Objects.equals(previousTasks, currentTasks)) { return; @@ -118,15 +118,15 @@ private void auditChangesToMlTasks(ClusterChangedEvent event) { * tasks, even if a previous audit warning has been created. * Care must be taken not to call this method frequently. */ - public void auditUnassignedMlTasks(DiscoveryNodes nodes, PersistentTasksCustomMetadata tasks) { + public void auditUnassignedMlTasks(DiscoveryNodes nodes, PersistentTasksMetadataSection tasks) { auditMlTasks(nodes, nodes, tasks, tasks, true); } private void auditMlTasks( DiscoveryNodes previousNodes, DiscoveryNodes currentNodes, - PersistentTasksCustomMetadata previousTasks, - PersistentTasksCustomMetadata currentTasks, + PersistentTasksMetadataSection previousTasks, + PersistentTasksMetadataSection currentTasks, boolean alwaysAuditUnassigned ) { if (currentTasks == null) { @@ -262,7 +262,7 @@ static String nodeName(DiscoveryNodes nodes, String nodeId) { } private void logLongTimeUnassigned(Instant now, ClusterState state) { - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks == null) { return; } @@ -287,7 +287,7 @@ private void logLongTimeUnassigned(Instant now, ClusterState state) { * the data structure used to record the information is in memory on the current master node, * not in cluster state. */ - synchronized List findLongTimeUnassignedTasks(Instant now, PersistentTasksCustomMetadata tasks) { + synchronized List findLongTimeUnassignedTasks(Instant now, PersistentTasksMetadataSection tasks) { assert tasks != null; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java index 47f0fde838b8e..c1b5f9d8a4196 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceService.java @@ -28,7 +28,7 @@ import org.elasticsearch.core.Releasable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -394,7 +394,7 @@ private void triggerJobsInStateWithoutMatchingTask( */ private void auditUnassignedMlTasks() { ClusterState state = clusterService.state(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks != null) { mlAssignmentNotifier.auditUnassignedMlTasks(state.nodes(), tasks); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java index 976e5ec255b85..847e6f061f769 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlLifeCycleService.java @@ -11,7 +11,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.component.LifecycleListener; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; @@ -114,7 +114,7 @@ static boolean isNodeSafeToShutdown(String nodeId, ClusterState state, Instant s logger.debug(() -> format("Node id [%s] has running deployments: %s", nodeId, nodeHasRunningDeployments)); - PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); // Ignore failed jobs - the persistent task still exists to remember the failure (because no // persistent task means closed), but these don't need to be relocated to another node. return MlTasks.nonFailedJobTasksOnNode(tasks, nodeId).isEmpty() diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java index 4fd1af3cfa2bc..ed3f29c210eda 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/MlMetrics.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.telemetry.metric.LongWithAttributes; import org.elasticsearch.telemetry.metric.MeterRegistry; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -319,10 +319,10 @@ public void clusterChanged(ClusterChangedEvent event) { final ClusterState previousState = event.previousState(); if (firstTime || event.metadataChanged()) { - final PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - final PersistentTasksCustomMetadata oldTasks = firstTime + final PersistentTasksMetadataSection tasks = currentState.getMetadata().section(PersistentTasksMetadataSection.TYPE); + final PersistentTasksMetadataSection oldTasks = firstTime ? null - : previousState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + : previousState.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks != null && tasks.equals(oldTasks) == false) { if (hasMasterRole) { mlTaskStatusCounts = findTaskStatuses(tasks); @@ -390,7 +390,7 @@ private void memoryLimitClusterSettingUpdated() { *

* The caller is expected to cache the returned stats to avoid unnecessary recalculation. */ - static MlTaskStatusCounts findTaskStatuses(PersistentTasksCustomMetadata tasks) { + static MlTaskStatusCounts findTaskStatuses(PersistentTasksMetadataSection tasks) { int adOpeningCount = 0; int adOpenedCount = 0; @@ -406,7 +406,7 @@ static MlTaskStatusCounts findTaskStatuses(PersistentTasksCustomMetadata tasks) int dfaStoppingCount = 0; int dfaFailedCount = 0; - for (PersistentTasksCustomMetadata.PersistentTask task : tasks.tasks()) { + for (PersistentTasksMetadataSection.PersistentTask task : tasks.tasks()) { switch (task.getTaskName()) { case JOB_TASK_NAME: switch (MlTasks.getJobStateModifiedForReassignments(task)) { @@ -469,7 +469,7 @@ static long findAdMemoryUsage(AutodetectProcessManager autodetectProcessManager) * Return the memory usage, in bytes, of the data frame analytics jobs that are running on the * current node. */ - static long findDfaMemoryUsage(DataFrameAnalyticsManager dataFrameAnalyticsManager, PersistentTasksCustomMetadata tasks) { + static long findDfaMemoryUsage(DataFrameAnalyticsManager dataFrameAnalyticsManager, PersistentTasksMetadataSection tasks) { return dataFrameAnalyticsManager.getActiveTaskMemoryUsage(tasks).getBytes(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java index c4820112211b0..481fc7e988bf2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCancelJobModelSnapshotUpgradeAction.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -70,13 +70,13 @@ public void doExecute(Task task, Request request, ActionListener liste ActionListener> expandIdsListener = listener.delegateFailureAndWrap((delegate, jobs) -> { SimpleIdsMatcher matcher = new SimpleIdsMatcher(request.getSnapshotId()); Set jobIds = jobs.stream().map(Job.Builder::getId).collect(Collectors.toSet()); - PersistentTasksCustomMetadata tasksInProgress = clusterService.state().metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = clusterService.state().metadata().section(PersistentTasksMetadataSection.TYPE); // allow_no_match plays no part here. The reason is that we have a principle that stopping // a stopped entity is a no-op, and upgrades that have already completed won't have a task. // This is a bit different to jobs and datafeeds, where the entity continues to exist even // after it's stopped. Upgrades cease to exist after they're stopped so the match validation // cannot be as thorough. - List> upgradeTasksToCancel = MlTasks.snapshotUpgradeTasks(tasksInProgress) + List> upgradeTasksToCancel = MlTasks.snapshotUpgradeTasks(tasksInProgress) .stream() .filter(t -> jobIds.contains(((SnapshotUpgradeTaskParams) t.getParams()).getJobId())) .filter(t -> matcher.idMatches(((SnapshotUpgradeTaskParams) t.getParams()).getSnapshotId())) @@ -90,7 +90,7 @@ public void doExecute(Task task, Request request, ActionListener liste private void removePersistentTasks( Request request, - List> upgradeTasksToCancel, + List> upgradeTasksToCancel, ActionListener listener ) { final int numberOfTasks = upgradeTasksToCancel.size(); @@ -102,10 +102,10 @@ private void removePersistentTasks( final AtomicInteger counter = new AtomicInteger(); final AtomicArray failures = new AtomicArray<>(numberOfTasks); - for (PersistentTasksCustomMetadata.PersistentTask task : upgradeTasksToCancel) { + for (PersistentTasksMetadataSection.PersistentTask task : upgradeTasksToCancel) { persistentTasksService.sendRemoveRequest(task.getId(), null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { if (counter.incrementAndGet() == numberOfTasks) { sendResponseOrFailure(listener, failures); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java index 306098f38bc08..42516b5e89e2d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportCloseJobAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -149,7 +149,7 @@ protected void doExecute(Task task, CloseJobAction.Request request, ActionListen final boolean isForce = request.isForce(); final TimeValue timeout = request.getCloseTimeout(); - PersistentTasksCustomMetadata tasksMetadata = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksMetadata = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); jobConfigProvider.expandJobsIds( request.getJobId(), request.allowNoMatch(), @@ -181,9 +181,10 @@ protected void doExecute(Task task, CloseJobAction.Request request, ActionListen forceCloseJob(state, request, jobIdsToForceClose, delegate3); } else { Set executorNodes = new HashSet<>(); - PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.metadata() + .section(PersistentTasksMetadataSection.TYPE); for (String resolvedJobId : request.getOpenJobIds()) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask( + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask( resolvedJobId, tasks ); @@ -258,7 +259,7 @@ static class OpenAndClosingIds { void validate( Collection expandedJobIds, boolean forceClose, - PersistentTasksCustomMetadata tasksMetadata, + PersistentTasksMetadataSection tasksMetadata, ActionListener listener ) { @@ -292,7 +293,7 @@ void stopDatafeedsIfNecessary( OpenAndClosingIds jobIds, boolean isForce, TimeValue timeout, - PersistentTasksCustomMetadata tasksMetadata, + PersistentTasksMetadataSection tasksMetadata, ActionListener listener ) { datafeedConfigProvider.findDatafeedIdsForJobIds(jobIds.openJobIds, listener.delegateFailureAndWrap((delegate, datafeedIds) -> { @@ -384,7 +385,7 @@ void isolateDatafeeds(List openJobs, List runningDatafeedIds, Ac static void addJobAccordingToState( String jobId, - PersistentTasksCustomMetadata tasksMetadata, + PersistentTasksMetadataSection tasksMetadata, List openJobs, List closingJobs, List failedJobs @@ -401,13 +402,13 @@ static void addJobAccordingToState( static TransportCloseJobAction.WaitForCloseRequest buildWaitForCloseRequest( List openJobIds, List closingJobIds, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, AnomalyDetectionAuditor auditor ) { TransportCloseJobAction.WaitForCloseRequest waitForCloseRequest = new TransportCloseJobAction.WaitForCloseRequest(); for (String jobId : openJobIds) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask != null) { auditor.info(jobId, Messages.JOB_AUDIT_CLOSING); waitForCloseRequest.persistentTasks.add(jobTask); @@ -415,7 +416,7 @@ static TransportCloseJobAction.WaitForCloseRequest buildWaitForCloseRequest( } } for (String jobId : closingJobIds) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask != null) { waitForCloseRequest.persistentTasks.add(jobTask); } @@ -507,19 +508,19 @@ private void forceCloseJob( List jobIdsToForceClose, ActionListener listener ) { - PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = currentState.getMetadata().section(PersistentTasksMetadataSection.TYPE); final int numberOfJobs = jobIdsToForceClose.size(); final AtomicInteger counter = new AtomicInteger(); final AtomicArray failures = new AtomicArray<>(numberOfJobs); for (String jobId : jobIdsToForceClose) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask != null) { auditor.info(jobId, Messages.JOB_AUDIT_FORCE_CLOSING); persistentTasksService.sendRemoveRequest(jobTask.getId(), null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { if (counter.incrementAndGet() == numberOfJobs) { sendResponseOrFailure(request.getJobId(), listener, failures); } @@ -571,7 +572,7 @@ private void normalCloseJob( List closingJobIds, ActionListener listener ) { - PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = currentState.getMetadata().section(PersistentTasksMetadataSection.TYPE); WaitForCloseRequest waitForCloseRequest = buildWaitForCloseRequest(openJobIds, closingJobIds, tasks, auditor); @@ -585,7 +586,7 @@ private void normalCloseJob( ActionListener intermediateListener = listener.delegateFailureAndWrap((delegate, response) -> { for (String jobId : movedJobs) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); persistentTasksService.sendRemoveRequest( jobTask.getId(), null, @@ -615,7 +616,7 @@ private void normalCloseJob( } static class WaitForCloseRequest { - final List> persistentTasks = new ArrayList<>(); + final List> persistentTasks = new ArrayList<>(); final List jobsToFinalize = new ArrayList<>(); public boolean hasJobsToWaitFor() { @@ -640,9 +641,9 @@ void waitForJobClosed( Set movedJobs ) { persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetadata -> { - for (PersistentTasksCustomMetadata.PersistentTask originalPersistentTask : waitForCloseRequest.persistentTasks) { + for (PersistentTasksMetadataSection.PersistentTask originalPersistentTask : waitForCloseRequest.persistentTasks) { String originalPersistentTaskId = originalPersistentTask.getId(); - PersistentTasksCustomMetadata.PersistentTask currentPersistentTask = persistentTasksCustomMetadata.getTask( + PersistentTasksMetadataSection.PersistentTask currentPersistentTask = persistentTasksCustomMetadata.getTask( originalPersistentTaskId ); if (currentPersistentTask != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java index a4ff765c7debf..f4474f21346fd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDataFrameAnalyticsAction.java @@ -21,7 +21,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -155,7 +155,7 @@ private void normalDelete( ActionListener listener ) { String id = request.getId(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); DataFrameAnalyticsState taskState = MlTasks.getDataFrameAnalyticsState(id, tasks); if (taskState != DataFrameAnalyticsState.STOPPED) { listener.onFailure( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java index 4fe24bbf468e2..de194f7067337 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteDatafeedAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -98,14 +98,14 @@ private void forceDeleteDatafeed( } private void removeDatafeedTask(DeleteDatafeedAction.Request request, ClusterState state, ActionListener listener) { - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(request.getDatafeedId(), tasks); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(request.getDatafeedId(), tasks); if (datafeedTask == null) { listener.onResponse(true); } else { persistentTasksService.sendRemoveRequest(datafeedTask.getId(), null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { listener.onResponse(Boolean.TRUE); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java index f25f36581d6fe..5dc436959504d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteForecastAction.java @@ -34,7 +34,7 @@ import org.elasticsearch.index.reindex.DeleteByQueryRequest; import org.elasticsearch.index.reindex.ScrollableHitSource; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.SearchHits; @@ -158,7 +158,7 @@ private void deleteForecasts( return; } final ClusterState state = clusterService.state(); - PersistentTasksCustomMetadata persistentTasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); JobState jobState = MlTasks.getJobState(jobId, persistentTasks); final List forecastIds; try { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java index 31aaf157d66ad..30adee65393e2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteJobAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.Nullable; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -189,7 +189,7 @@ protected void masterOperation( response -> deleteDatafeedIfNecessary(request, datafeedDeleteListener), e -> { if (request.isForce() - && MlTasks.getJobTask(request.getJobId(), state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE)) != null) { + && MlTasks.getJobTask(request.getJobId(), state.getMetadata().section(PersistentTasksMetadataSection.TYPE)) != null) { logger.info("[{}] config is missing but task exists. Attempting to delete tasks and stop process", request.getJobId()); forceDeleteJob(parentTaskClient, request, state, finalListener); } else { @@ -285,9 +285,9 @@ private static void killProcess( } private void removePersistentTask(String jobId, ClusterState currentState, ActionListener listener) { - PersistentTasksCustomMetadata tasks = currentState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = currentState.getMetadata().section(PersistentTasksMetadataSection.TYPE); - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask == null) { listener.onResponse(null); } else { @@ -296,8 +296,8 @@ private void removePersistentTask(String jobId, ClusterState currentState, Actio } private static void checkJobIsNotOpen(String jobId, ClusterState state) { - PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection tasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask != null) { JobTaskState jobTaskState = (JobTaskState) jobTask.getState(); throw ExceptionsHelper.conflictStatusException( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java index fe7eec7623ac1..8907a86db4709 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAction.java @@ -151,7 +151,7 @@ static List getModelAliases(ClusterState clusterState, String modelId) { private void deleteModel(DeleteTrainedModelAction.Request request, ClusterState state, ActionListener listener) { String id = request.getId(); - IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); + IngestMetadata currentIngestMetadata = state.metadata().section(IngestMetadata.TYPE); Set referencedModels = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(currentIngestMetadata); if (request.isForce() == false && referencedModels.contains(id)) { @@ -241,7 +241,7 @@ public ClusterState execute(final ClusterState currentState) { modelAliases.forEach(newMetadata::remove); final ModelAliasMetadata modelAliasMetadata = new ModelAliasMetadata(newMetadata); builder.metadata( - Metadata.builder(currentState.getMetadata()).putCustom(ModelAliasMetadata.NAME, modelAliasMetadata).build() + Metadata.builder(currentState.getMetadata()).putSection(ModelAliasMetadata.NAME, modelAliasMetadata).build() ); return builder.build(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java index 643ceaae442d6..b98ce42ed1369 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportDeleteTrainedModelAliasAction.java @@ -111,7 +111,7 @@ static ClusterState deleteModelAlias( request.getModelId() ); } - IngestMetadata currentIngestMetadata = currentState.metadata().custom(IngestMetadata.TYPE); + IngestMetadata currentIngestMetadata = currentState.metadata().section(IngestMetadata.TYPE); Set referencedModels = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(currentIngestMetadata); if (referencedModels.contains(request.getModelAlias())) { throw new ElasticsearchStatusException( @@ -127,7 +127,7 @@ static ClusterState deleteModelAlias( newMetadata.remove(request.getModelAlias()); final ModelAliasMetadata modelAliasMetadata = new ModelAliasMetadata(newMetadata); - builder.metadata(Metadata.builder(currentState.getMetadata()).putCustom(ModelAliasMetadata.NAME, modelAliasMetadata).build()); + builder.metadata(Metadata.builder(currentState.getMetadata()).putSection(ModelAliasMetadata.NAME, modelAliasMetadata).build()); return builder.build(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java index 267f67d3155e9..afdf5390c4366 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDataFrameAnalyticsStatsAction.java @@ -26,7 +26,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortBuilders; import org.elasticsearch.search.sort.SortOrder; @@ -363,8 +363,8 @@ private GetDataFrameAnalyticsStatsAction.Response.Stats buildStats( AnalysisStats analysisStats ) { ClusterState clusterState = clusterService.state(); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask analyticsTask = MlTasks.getDataFrameAnalyticsTask(concreteAnalyticsId, tasks); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask analyticsTask = MlTasks.getDataFrameAnalyticsTask(concreteAnalyticsId, tasks); DataFrameAnalyticsState analyticsState = MlTasks.getDataFrameAnalyticsState(concreteAnalyticsId, tasks); String failureReason = null; if (analyticsState == DataFrameAnalyticsState.FAILED) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedRunningStateAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedRunningStateAction.java index b62f03cfa20ce..0f9326e33d4f7 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedRunningStateAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedRunningStateAction.java @@ -16,7 +16,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -89,12 +89,12 @@ protected void taskOperation( @Override protected void doExecute(Task task, Request request, ActionListener listener) { DiscoveryNodes nodes = clusterService.state().nodes(); - PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); if (tasks == null) { listener.onResponse(new Response(Collections.emptyMap())); return; } - final List> datafeedTasks = request.getDatafeedTaskIds() + final List> datafeedTasks = request.getDatafeedTaskIds() .stream() .map(tasks::getTask) .filter(Objects::nonNull) @@ -131,7 +131,7 @@ protected void doExecute(Task task, Request request, ActionListener li }, listener::onFailure); String[] nodesOfConcern = datafeedTasks.stream() - .map(PersistentTasksCustomMetadata.PersistentTask::getExecutorNode) + .map(PersistentTasksMetadataSection.PersistentTask::getExecutorNode) .filter(Objects::nonNull) .filter(nodes::nodeExists) .toArray(String[]::new); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java index 61e9a3f24894e..ce59b4ded21a1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetDatafeedsStatsAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.transport.TransportService; @@ -67,7 +67,7 @@ public TransportGetDatafeedsStatsAction( protected void doExecute(Task task, Request request, ActionListener listener) { logger.trace(() -> "[" + request.getDatafeedId() + "] get stats for datafeed"); ClusterState state = clusterService.state(); - final PersistentTasksCustomMetadata tasksInProgress = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection tasksInProgress = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); final Response.Builder responseBuilder = new Response.Builder(); final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java index 997986d8ce76d..5ced29abe1d09 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobModelSnapshotsUpgradeStatsAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -75,8 +75,8 @@ public TransportGetJobModelSnapshotsUpgradeStatsAction( @Override protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) { logger.debug(() -> format("[%s] get stats for model snapshot [%s] upgrades", request.getJobId(), request.getSnapshotId())); - final PersistentTasksCustomMetadata tasksInProgress = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - final Collection> snapshotUpgrades = MlTasks.snapshotUpgradeTasks(tasksInProgress); + final PersistentTasksMetadataSection tasksInProgress = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); + final Collection> snapshotUpgrades = MlTasks.snapshotUpgradeTasks(tasksInProgress); final TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); // 2. Now that we have the job IDs, find the relevant model snapshot upgrades diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java index 12e5a1b32e7d8..7d97ff7610241 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportGetJobsStatsAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -96,7 +96,7 @@ protected void doExecute(Task task, GetJobsStatsAction.Request request, ActionLi TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), task.getId()); ClusterState state = clusterService.state(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); // If there are deleted configs, but the task is still around, we probably want to return the tasks in the stats call jobConfigProvider.expandJobsIds( request.getJobId(), @@ -144,13 +144,13 @@ protected void taskOperation( TaskId parentTaskId = new TaskId(clusterService.localNode().getId(), actionTask.getId()); String jobId = task.getJobId(); ClusterState state = clusterService.state(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); Optional>> stats = processManager.getStatistics(task); if (stats.isPresent()) { DataCounts dataCounts = stats.get().v1(); ModelSizeStats modelSizeStats = stats.get().v2().v1(); TimingStats timingStats = stats.get().v2().v2(); - PersistentTasksCustomMetadata.PersistentTask pTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask pTask = MlTasks.getJobTask(jobId, tasks); DiscoveryNode node = state.nodes().get(pTask.getExecutorNode()); JobState jobState = MlTasks.getJobState(jobId, tasks); String assignmentExplanation = pTask.getAssignment().getExplanation(); @@ -201,7 +201,7 @@ void gatherStatsForClosedJobs( } }; - PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME).execute(() -> { for (int i = 0; i < closedJobIds.size(); i++) { int slot = i; @@ -216,7 +216,7 @@ void gatherStatsForClosedJobs( parentTaskId, (dataCounts, modelSizeStats, timingStats) -> { JobState jobState = MlTasks.getJobState(jobId, tasks); - PersistentTasksCustomMetadata.PersistentTask pTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection.PersistentTask pTask = MlTasks.getJobTask(jobId, tasks); String assignmentExplanation = null; if (pTask != null) { assignmentExplanation = pTask.getAssignment().getExplanation(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java index ba857b9d3946f..48b49f2a11917 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportIsolateDatafeedAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -47,8 +47,8 @@ public TransportIsolateDatafeedAction(TransportService transportService, ActionF @Override protected void doExecute(Task task, IsolateDatafeedAction.Request request, ActionListener listener) { final ClusterState state = clusterService.state(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(request.getDatafeedId(), tasks); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(request.getDatafeedId(), tasks); if (datafeedTask == null || datafeedTask.getExecutorNode() == null) { // No running datafeed task to isolate diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java index 9678f007e9397..56c16d6ceda9c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportJobTaskAction.java @@ -14,7 +14,7 @@ import org.elasticsearch.action.support.tasks.TransportTasksAction; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.Writeable; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -55,8 +55,8 @@ protected void doExecute(Task task, Request request, ActionListener li String jobId = request.getJobId(); // We need to check whether there is at least an assigned task here, otherwise we cannot redirect to the // node running the job task. - PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + PersistentTasksMetadataSection tasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask == null || jobTask.isAssigned() == false) { String message = "Cannot perform requested action because job [" + jobId + "] is not open"; listener.onFailure(ExceptionsHelper.conflictStatusException(message)); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java index 2ef7aeec7748b..992f33a51b477 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportKillProcessAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportService; @@ -96,13 +96,13 @@ protected void taskOperation( @Override protected void doExecute(Task task, KillProcessAction.Request request, ActionListener listener) { DiscoveryNodes nodes = clusterService.state().nodes(); - PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - List> jobTasks; + PersistentTasksMetadataSection tasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); + List> jobTasks; if (Strings.isAllOrWildcard(request.getJobId())) { jobTasks = MlTasks.openJobTasks(tasks).stream().filter(t -> t.getExecutorNode() != null).collect(Collectors.toList()); } else { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(request.getJobId(), tasks); + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(request.getJobId(), tasks); if (jobTask == null || jobTask.getExecutorNode() == null) { jobTasks = Collections.emptyList(); } else { @@ -128,7 +128,7 @@ protected void doExecute(Task task, KillProcessAction.Request request, ActionLis request.setNodes( jobTasks.stream() .filter(t -> t.getExecutorNode() != null && nodes.get(t.getExecutorNode()) != null) - .map(PersistentTasksCustomMetadata.PersistentTask::getExecutorNode) + .map(PersistentTasksMetadataSection.PersistentTask::getExecutorNode) .toArray(String[]::new) ); super.doExecute(task, request, listener); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java index cefc1f9ad02fa..d226e4ec8f0d1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportMlMemoryAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.monitor.os.OsStats; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -129,7 +129,7 @@ protected void masterOperation( if (memoryTracker.isEverRefreshed()) { memoryTrackerRefreshListener.onResponse(null); } else { - memoryTracker.refresh(state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE), memoryTrackerRefreshListener); + memoryTracker.refresh(state.getMetadata().section(PersistentTasksMetadataSection.TYPE), memoryTrackerRefreshListener); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java index b220052baff0d..7af84f1e49948 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportOpenJobAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -139,10 +139,10 @@ protected void masterOperation( }, listener::onFailure); // Wait for job to be started - ActionListener> waitForJobToStart = + ActionListener> waitForJobToStart = new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { waitForJobStarted(task.getId(), jobParams, clearJobFinishTime); } @@ -254,7 +254,7 @@ private void waitForJobStarted(String taskId, OpenJobAction.JobParams jobParams, jobParams.getTimeout(), new PersistentTasksService.WaitForPersistentTaskListener() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (predicate.exception != null) { if (predicate.shouldCancel) { // We want to return to the caller without leaving an unassigned persistent task, to match @@ -321,13 +321,13 @@ private void clearJobFinishedTime( } private void cancelJobStart( - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Exception exception, ActionListener listener ) { persistentTasksService.sendRemoveRequest(persistentTask.getId(), null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { // We succeeded in cancelling the persistent task, but the // problem that caused us to cancel it is the overall result listener.onFailure(exception); @@ -356,14 +356,14 @@ public void onFailure(Exception e) { * Important: the methods of this class must NOT throw exceptions. If they did then the callers * of endpoints waiting for a condition tested by this predicate would never get a response. */ - private static class JobPredicate implements Predicate> { + private static class JobPredicate implements Predicate> { private volatile Exception exception; private volatile String node = ""; private volatile boolean shouldCancel; @Override - public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public boolean test(PersistentTasksMetadataSection.PersistentTask persistentTask) { JobState jobState = JobState.CLOSED; String reason = null; if (persistentTask != null) { @@ -371,7 +371,7 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa jobState = jobTaskState == null ? JobState.OPENING : jobTaskState.getState(); reason = jobTaskState == null ? null : jobTaskState.getReason(); - PersistentTasksCustomMetadata.Assignment assignment = persistentTask.getAssignment(); + PersistentTasksMetadataSection.Assignment assignment = persistentTask.getAssignment(); // This means we are awaiting a new node to be spun up, ok to return back to the user to await node creation if (assignment != null && assignment.equals(JobNodeSelector.AWAITING_LAZY_ASSIGNMENT)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java index d38c923515203..736dc73558bee 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportPutTrainedModelAliasAction.java @@ -276,7 +276,7 @@ static ClusterState updateModelAlias(final ClusterState currentState, final PutT } newMetadata.put(request.getModelAlias(), new ModelAliasMetadata.ModelAliasEntry(request.getModelId())); final ModelAliasMetadata modelAliasMetadata = new ModelAliasMetadata(newMetadata); - builder.metadata(Metadata.builder(currentState.getMetadata()).putCustom(ModelAliasMetadata.NAME, modelAliasMetadata).build()); + builder.metadata(Metadata.builder(currentState.getMetadata()).putSection(ModelAliasMetadata.NAME, modelAliasMetadata).build()); return builder.build(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java index 13de0ad51ff1c..367979741766f 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportResetJobAction.java @@ -27,7 +27,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -106,7 +106,7 @@ protected void masterOperation( ActionListener jobListener = ActionListener.wrap(jobBuilder -> { Job job = jobBuilder.build(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); JobState jobState = MlTasks.getJobState(job.getId(), tasks); if (request.isSkipJobStateValidation() == false && jobState != JobState.CLOSED) { listener.onFailure(ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.REST_JOB_NOT_CLOSED_RESET))); @@ -212,7 +212,7 @@ private void resetJob( // Now that we have updated the job's block reason, we should check again // if the job has been opened. - PersistentTasksCustomMetadata tasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); JobState jobState = MlTasks.getJobState(jobId, tasks); if (request.isSkipJobStateValidation() == false && jobState != JobState.CLOSED) { jobConfigProvider.updateJobBlockReason( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java index 0cc82198f09d4..8105b95377154 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportRevertModelSnapshotAction.java @@ -22,7 +22,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -112,7 +112,7 @@ protected void masterOperation( // 5. Revert the state ActionListener annotationsIndexUpdateListener = ActionListener.wrap(r -> { ActionListener jobListener = ActionListener.wrap(job -> { - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); JobState jobState = MlTasks.getJobState(job.getId(), tasks); if (request.isForce() == false && jobState.equals(JobState.CLOSED) == false) { listener.onFailure(ExceptionsHelper.conflictStatusException(Messages.getMessage(Messages.REST_JOB_NOT_CLOSED_REVERT))); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java index 117e534490f9a..f36b95969d377 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetResetModeAction.java @@ -49,12 +49,12 @@ protected ClusterState setState(ClusterState oldState, SetResetModeActionRequest if (request.shouldDeleteMetadata()) { assert request.isEnabled() == false; // SetResetModeActionRequest should have enforced this newState.metadata( - Metadata.builder(oldState.getMetadata()).removeCustom(MlMetadata.TYPE).removeCustom(ModelAliasMetadata.NAME).build() + Metadata.builder(oldState.getMetadata()).removeSection(MlMetadata.TYPE).removeSection(ModelAliasMetadata.NAME).build() ); } else { - MlMetadata.Builder builder = MlMetadata.Builder.from(oldState.metadata().custom(MlMetadata.TYPE)) + MlMetadata.Builder builder = MlMetadata.Builder.from(oldState.metadata().section(MlMetadata.TYPE)) .isResetMode(request.isEnabled()); - newState.metadata(Metadata.builder(oldState.getMetadata()).putCustom(MlMetadata.TYPE, builder.build()).build()); + newState.metadata(Metadata.builder(oldState.getMetadata()).putSection(MlMetadata.TYPE, builder.build()).build()); } return newState.build(); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java index 744d5dbd6974f..6150cbda403d2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportSetUpgradeModeAction.java @@ -31,8 +31,8 @@ import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -142,7 +142,7 @@ protected void masterOperation( isRunning.set(false); listener.onFailure(e); }); - final PersistentTasksCustomMetadata tasksCustomMetadata = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + final PersistentTasksMetadataSection tasksCustomMetadata = state.metadata().section(PersistentTasksMetadataSection.TYPE); // <4> We have unassigned the tasks, respond to the listener. ActionListener>> unassignPersistentTasksListener = ActionListener.wrap(unassignedPersistentTasks -> { @@ -249,10 +249,10 @@ protected AcknowledgedResponse newResponse(boolean acknowledged) { @Override public ClusterState execute(ClusterState currentState) throws Exception { logger.trace("Executing cluster state update"); - MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metadata().custom(MlMetadata.TYPE)); + MlMetadata.Builder builder = new MlMetadata.Builder(currentState.metadata().section(MlMetadata.TYPE)); builder.isUpgradeMode(request.isEnabled()); ClusterState.Builder newState = ClusterState.builder(currentState); - newState.metadata(Metadata.builder(currentState.getMetadata()).putCustom(MlMetadata.TYPE, builder.build()).build()); + newState.metadata(Metadata.builder(currentState.getMetadata()).putSection(MlMetadata.TYPE, builder.build()).build()); return newState.build(); } }); @@ -282,7 +282,7 @@ protected ClusterBlockException checkBlock(SetUpgradeModeAction.Request request, * @param listener Alerted when tasks are unassignd */ private void unassignPersistentTasks( - PersistentTasksCustomMetadata tasksCustomMetadata, + PersistentTasksMetadataSection tasksCustomMetadata, ActionListener>> listener ) { List> mlTasks = tasksCustomMetadata.tasks() @@ -321,7 +321,7 @@ private void unassignPersistentTasks( } private void isolateDatafeeds( - PersistentTasksCustomMetadata tasksCustomMetadata, + PersistentTasksMetadataSection tasksCustomMetadata, ActionListener> listener ) { Set datafeedsToIsolate = MlTasks.startedDatafeedIds(tasksCustomMetadata); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java index 9db8a72f0bb14..86e389e5bd53d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsAction.java @@ -38,7 +38,7 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -179,10 +179,10 @@ protected void masterOperation( } // Wait for analytics to be started - ActionListener> waitForAnalyticsToStart = new ActionListener< - PersistentTasksCustomMetadata.PersistentTask>() { + ActionListener> waitForAnalyticsToStart = new ActionListener< + PersistentTasksMetadataSection.PersistentTask>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { waitForAnalyticsStarted(task, request.getTimeout(), listener); } @@ -449,7 +449,7 @@ private static void checkDestIndexIsEmptyIfExists( } private void waitForAnalyticsStarted( - PersistentTasksCustomMetadata.PersistentTask task, + PersistentTasksMetadataSection.PersistentTask task, TimeValue timeout, ActionListener listener ) { @@ -462,7 +462,7 @@ private void waitForAnalyticsStarted( new PersistentTasksService.WaitForPersistentTaskListener() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (predicate.exception != null) { // We want to return to the caller without leaving an unassigned persistent task, to match // what would have happened if the error had been detected in the "fast fail" validation @@ -530,19 +530,19 @@ private StartContext(DataFrameAnalyticsConfig config, List progre * Important: the methods of this class must NOT throw exceptions. If they did then the callers * of endpoints waiting for a condition tested by this predicate would never get a response. */ - private static class AnalyticsPredicate implements Predicate> { + private static class AnalyticsPredicate implements Predicate> { private volatile Exception exception; private volatile String node = ""; private volatile String assignmentExplanation; @Override - public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public boolean test(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (persistentTask == null) { return false; } - PersistentTasksCustomMetadata.Assignment assignment = persistentTask.getAssignment(); + PersistentTasksMetadataSection.Assignment assignment = persistentTask.getAssignment(); // This means we are awaiting a new node to be spun up, ok to return back to the user to await node creation if (assignment != null && assignment.equals(JobNodeSelector.AWAITING_LAZY_ASSIGNMENT)) { @@ -551,7 +551,7 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa String reason = "__unknown__"; if (assignment != null - && assignment.equals(PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT) == false + && assignment.equals(PersistentTasksMetadataSection.INITIAL_ASSIGNMENT) == false && assignment.isAssigned() == false) { assignmentExplanation = assignment.getExplanation(); // Assignment failed due to primary shard check. @@ -597,16 +597,16 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa } private void cancelAnalyticsStart( - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Exception exception, ActionListener listener ) { persistentTasksService.sendRemoveRequest( persistentTask.getId(), null, - new ActionListener>() { + new ActionListener>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { // We succeeded in cancelling the persistent task, but the // problem that caused us to cancel it is the overall result listener.onFailure(exception); @@ -668,7 +668,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Map headers ) { return new DataFrameAnalyticsTask( @@ -686,13 +686,13 @@ protected AllocatedPersistentTask createTask( } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( TaskParams params, Collection candidateNodes, @SuppressWarnings("HiddenField") ClusterState clusterState ) { boolean isMemoryTrackerRecentlyRefreshed = memoryTracker.isRecentlyRefreshed(); - Optional optionalAssignment = getPotentialAssignment( + Optional optionalAssignment = getPotentialAssignment( params, clusterState, isMemoryTrackerRecentlyRefreshed @@ -712,7 +712,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( ); // Pass an effectively infinite value for max concurrent opening jobs, because data frame analytics jobs do // not have an "opening" state so would never be rejected for causing too many jobs in the "opening" state - PersistentTasksCustomMetadata.Assignment assignment = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment assignment = jobNodeSelector.selectNode( maxOpenJobs, Integer.MAX_VALUE, maxMachineMemoryPercent, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java index 26d26d87e4cc7..3408e1cb005e6 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedAction.java @@ -35,8 +35,8 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -144,7 +144,7 @@ public TransportStartDatafeedAction( static void validate( Job job, DatafeedConfig datafeedConfig, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, NamedXContentRegistry xContentRegistry ) { DatafeedJobValidator.validate(datafeedConfig, job, xContentRegistry); @@ -199,12 +199,12 @@ protected void masterOperation( ); AtomicReference datafeedConfigHolder = new AtomicReference<>(); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); - ActionListener> waitForTaskListener = + ActionListener> waitForTaskListener = new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { waitForDatafeedStarted(persistentTask.getId(), params, responseHeaderPreservingListener); } @@ -331,7 +331,7 @@ private void createDataExtractor( Job job, DatafeedConfig datafeed, StartDatafeedAction.DatafeedParams params, - ActionListener> listener + ActionListener> listener ) { DataExtractorFactory.create( new ParentTaskAssigningClient(client, clusterService.localNode(), task), @@ -373,7 +373,7 @@ private void waitForDatafeedStarted( params.getTimeout(), new PersistentTasksService.WaitForPersistentTaskListener() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (predicate.exception != null) { // We want to return to the caller without leaving an unassigned persistent task, to match // what would have happened if the error had been detected in the "fast fail" validation @@ -404,13 +404,13 @@ public void onTimeout(TimeValue timeout) { } private void cancelDatafeedStart( - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Exception exception, ActionListener listener ) { persistentTasksService.sendRemoveRequest(persistentTask.getId(), null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { // We succeeded in cancelling the persistent task, but the // problem that caused us to cancel it is the overall result listener.onFailure(exception); @@ -485,7 +485,7 @@ public StartDatafeedPersistentTasksExecutor( } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( StartDatafeedAction.DatafeedParams params, Collection candidateNodes, ClusterState clusterState @@ -547,7 +547,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Map headers ) { return new DatafeedTask(id, type, action, parentTaskId, persistentTask.getParams(), headers); @@ -709,17 +709,17 @@ public GetDatafeedRunningStateAction.Response.RunningState getRunningState() { * Important: the methods of this class must NOT throw exceptions. If they did then the callers * of endpoints waiting for a condition tested by this predicate would never get a response. */ - private static class DatafeedPredicate implements Predicate> { + private static class DatafeedPredicate implements Predicate> { private volatile Exception exception; private volatile String node = ""; @Override - public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public boolean test(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (persistentTask == null) { return false; } - PersistentTasksCustomMetadata.Assignment assignment = persistentTask.getAssignment(); + PersistentTasksMetadataSection.Assignment assignment = persistentTask.getAssignment(); if (assignment != null) { // This means we are awaiting the datafeed's job to be assigned to a node if (assignment.equals(DatafeedNodeSelector.AWAITING_JOB_ASSIGNMENT)) { @@ -729,7 +729,7 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa if (assignment.equals(DatafeedNodeSelector.AWAITING_JOB_RELOCATION)) { return true; } - if (assignment.equals(PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT) == false && assignment.isAssigned() == false) { + if (assignment.equals(PersistentTasksMetadataSection.INITIAL_ASSIGNMENT) == false && assignment.isAssigned() == false) { // Assignment has failed despite passing our "fast fail" validation exception = new ElasticsearchStatusException( "Could not start datafeed, allocation explanation [" + assignment.getExplanation() + "]", diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java index e130b13f4ec30..cc7f09b25de20 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStartTrainedModelDeploymentAction.java @@ -36,7 +36,7 @@ import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.sort.SortBuilders; @@ -215,7 +215,9 @@ protected void masterOperation( perDeploymentMemoryBytes.get(), perAllocationMemoryBytes.get() ); - PersistentTasksCustomMetadata persistentTasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = clusterService.state() + .getMetadata() + .section(PersistentTasksMetadataSection.TYPE); memoryTracker.refresh( persistentTasks, ActionListener.wrap( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java index 7d39cd7f76e17..45f73bf3aa8bc 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -118,7 +118,7 @@ protected void doExecute( ActionListener> expandedIdsListener = ActionListener.wrap(idsToStop -> { logger.debug("Resolved data frame analytics to stop: {}", idsToStop); - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); AnalyticsByTaskState analyticsByTaskState = AnalyticsByTaskState.build(idsToStop, tasks); if (analyticsByTaskState.isEmpty()) { @@ -165,7 +165,7 @@ private void findIdsToStop( } private static Set getAllStartedIds(ClusterState clusterState) { - PersistentTasksCustomMetadata tasksMetadata = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksMetadata = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); return tasksMetadata == null ? Collections.emptySet() : tasksMetadata.tasks() @@ -204,7 +204,7 @@ private void normalStop( Task task, StopDataFrameAnalyticsAction.Request request, ActionListener listener, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, AnalyticsByTaskState analyticsByTaskState ) { if (analyticsByTaskState.failed.isEmpty() == false) { @@ -249,7 +249,7 @@ private void normalStop( private void forceStop( StopDataFrameAnalyticsAction.Request request, ActionListener listener, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, List nonStoppedAnalytics ) { @@ -257,7 +257,7 @@ private void forceStop( final AtomicArray failures = new AtomicArray<>(nonStoppedAnalytics.size()); for (String analyticsId : nonStoppedAnalytics) { - PersistentTasksCustomMetadata.PersistentTask analyticsTask = MlTasks.getDataFrameAnalyticsTask(analyticsId, tasks); + PersistentTasksMetadataSection.PersistentTask analyticsTask = MlTasks.getDataFrameAnalyticsTask(analyticsId, tasks); if (analyticsTask != null) { persistentTasksService.sendRemoveRequest(analyticsTask.getId(), null, ActionListener.wrap(removedTask -> { auditor.info(analyticsId, Messages.DATA_FRAME_ANALYTICS_AUDIT_FORCE_STOPPED); @@ -314,10 +314,10 @@ private static void sendResponseOrFailure( listener.onFailure(e); } - private String[] findAllocatedNodesAndRemoveUnassignedTasks(List analyticsIds, PersistentTasksCustomMetadata tasks) { + private String[] findAllocatedNodesAndRemoveUnassignedTasks(List analyticsIds, PersistentTasksMetadataSection tasks) { List nodes = new ArrayList<>(); for (String analyticsId : analyticsIds) { - PersistentTasksCustomMetadata.PersistentTask task = MlTasks.getDataFrameAnalyticsTask(analyticsId, tasks); + PersistentTasksMetadataSection.PersistentTask task = MlTasks.getDataFrameAnalyticsTask(analyticsId, tasks); if (task == null) { // This should not be possible; we filtered started analytics thus the task should exist String msg = "Requested data frame analytics [" + analyticsId + "] be stopped but the task could not be found"; @@ -455,7 +455,7 @@ List getNonStopped() { return nonStopped; } - static AnalyticsByTaskState build(Set analyticsIds, PersistentTasksCustomMetadata tasks) { + static AnalyticsByTaskState build(Set analyticsIds, PersistentTasksMetadataSection tasks) { List started = new ArrayList<>(); List stopping = new ArrayList<>(); List failed = new ArrayList<>(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java index f998701dbd4e0..12d1b0d54c140 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedAction.java @@ -29,7 +29,7 @@ import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; @@ -115,7 +115,7 @@ public TransportStopDatafeedAction( */ static void sortDatafeedIdsByTaskState( Collection expandedDatafeedIds, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, List startedDatafeedIds, List stoppingDatafeedIds, List notStoppedDatafeedIds @@ -189,7 +189,7 @@ private void doExecute( ); } } else { - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); datafeedConfigProvider.expandDatafeedIds( request.getDatafeedId(), request.allowNoMatch(), @@ -220,7 +220,7 @@ private void normalStopDatafeed( Task task, StopDatafeedAction.Request request, ActionListener listener, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, DiscoveryNodes nodes, List startedDatafeeds, List stoppingDatafeeds, @@ -229,9 +229,9 @@ private void normalStopDatafeed( final Set executorNodes = new HashSet<>(); final List startedDatafeedsJobs = new ArrayList<>(); final List resolvedStartedDatafeeds = new ArrayList<>(); - final List> allDataFeedsToWaitFor = new ArrayList<>(); + final List> allDataFeedsToWaitFor = new ArrayList<>(); for (String datafeedId : startedDatafeeds) { - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); if (datafeedTask == null) { // This should not happen, because startedDatafeeds was derived from the same tasks that is passed to this method String msg = "Requested datafeed [" + datafeedId + "] be stopped, but datafeed's task could not be found."; @@ -262,7 +262,7 @@ private void normalStopDatafeed( } for (String datafeedId : stoppingDatafeeds) { - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); assert datafeedTask != null : "Requested datafeed [" + datafeedId + "] be stopped, but datafeed's task could not be found."; allDataFeedsToWaitFor.add(datafeedTask); } @@ -275,7 +275,7 @@ private void normalStopDatafeed( ActionListener finalListener = ActionListener.wrap( response -> waitForDatafeedStopped(allDataFeedsToWaitFor, request, response, ActionListener.wrap(finished -> { for (String datafeedId : movedDatafeeds) { - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); persistentTasksService.sendRemoveRequest( datafeedTask.getId(), null, @@ -362,9 +362,9 @@ private void normalStopDatafeed( super.doExecute(task, request, finalListener); } - private void auditDatafeedStopped(PersistentTasksCustomMetadata.PersistentTask datafeedTask) { + private void auditDatafeedStopped(PersistentTasksMetadataSection.PersistentTask datafeedTask) { @SuppressWarnings("unchecked") - String jobId = ((PersistentTasksCustomMetadata.PersistentTask) datafeedTask).getParams() + String jobId = ((PersistentTasksMetadataSection.PersistentTask) datafeedTask).getParams() .getJobId(); auditor.info(jobId, Messages.getMessage(Messages.JOB_AUDIT_DATAFEED_STOPPED)); } @@ -372,7 +372,7 @@ private void auditDatafeedStopped(PersistentTasksCustomMetadata.PersistentTask listener, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, DiscoveryNodes nodes, final List notStoppedDatafeeds ) { @@ -380,7 +380,7 @@ private void forceStopDatafeed( final AtomicArray failures = new AtomicArray<>(notStoppedDatafeeds.size()); for (String datafeedId : notStoppedDatafeeds) { - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); if (datafeedTask != null) { persistentTasksService.sendRemoveRequest(datafeedTask.getId(), null, ActionListener.wrap(persistentTask -> { // For force stop, only audit here if the datafeed was unassigned at the time of the stop, hence inactive. @@ -491,16 +491,16 @@ private static void sendResponseOrFailure( * have been sent to the wrong node and ignored there, so we'll just spin until the timeout expires. */ void waitForDatafeedStopped( - List> datafeedPersistentTasks, + List> datafeedPersistentTasks, StopDatafeedAction.Request request, StopDatafeedAction.Response response, ActionListener listener, Set movedDatafeeds ) { persistentTasksService.waitForPersistentTasksCondition(persistentTasksCustomMetadata -> { - for (PersistentTasksCustomMetadata.PersistentTask originalPersistentTask : datafeedPersistentTasks) { + for (PersistentTasksMetadataSection.PersistentTask originalPersistentTask : datafeedPersistentTasks) { String originalPersistentTaskId = originalPersistentTask.getId(); - PersistentTasksCustomMetadata.PersistentTask currentPersistentTask = persistentTasksCustomMetadata.getTask( + PersistentTasksMetadataSection.PersistentTask currentPersistentTask = persistentTasksCustomMetadata.getTask( originalPersistentTaskId ); if (currentPersistentTask != null) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java index cbfa770f97e0f..9f3559c5b949a 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportStopTrainedModelDeploymentAction.java @@ -122,7 +122,7 @@ protected void doExecute( return; } - IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); + IngestMetadata currentIngestMetadata = state.metadata().section(IngestMetadata.TYPE); Set referencedModels = InferenceProcessorInfoExtractor.getModelIdsFromInferenceProcessors(currentIngestMetadata); if (request.isForce() == false) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java index 43b2f22ae79f0..a25db4f02ca95 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/action/TransportUpgradeJobModelSnapshotAction.java @@ -25,8 +25,8 @@ import org.elasticsearch.injection.guice.Inject; import org.elasticsearch.license.LicenseUtils; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -123,7 +123,7 @@ protected void masterOperation(Task task, Request request, ClusterState state, A return; } - PersistentTasksCustomMetadata customMetadata = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection customMetadata = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (customMetadata != null && (customMetadata.findTasks( MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java index cca59f27d5c76..8561346d6bc21 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingContext.java @@ -9,7 +9,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; import org.elasticsearch.xpack.core.ml.action.StartDataFrameAnalyticsAction; @@ -34,9 +34,9 @@ class MlAutoscalingContext { - final Collection> anomalyDetectionTasks; - final Collection> snapshotUpgradeTasks; - final Collection> dataframeAnalyticsTasks; + final Collection> anomalyDetectionTasks; + final Collection> snapshotUpgradeTasks; + final Collection> dataframeAnalyticsTasks; final Map modelAssignments; final List waitingAnomalyJobs; @@ -45,19 +45,19 @@ class MlAutoscalingContext { final List waitingAllocatedModels; final List mlNodes; - final PersistentTasksCustomMetadata persistentTasks; + final PersistentTasksMetadataSection persistentTasks; MlAutoscalingContext() { this(List.of(), List.of(), List.of(), Map.of(), List.of(), null); } MlAutoscalingContext( - final Collection> anomalyDetectionTasks, - final Collection> snapshotUpgradeTasks, - final Collection> dataframeAnalyticsTasks, + final Collection> anomalyDetectionTasks, + final Collection> snapshotUpgradeTasks, + final Collection> dataframeAnalyticsTasks, final Map modelAssignments, final List mlNodes, - final PersistentTasksCustomMetadata persistentTasks + final PersistentTasksMetadataSection persistentTasks ) { this.anomalyDetectionTasks = anomalyDetectionTasks; this.snapshotUpgradeTasks = snapshotUpgradeTasks; @@ -73,7 +73,7 @@ class MlAutoscalingContext { } MlAutoscalingContext(ClusterState clusterState) { - persistentTasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + persistentTasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); anomalyDetectionTasks = anomalyDetectionTasks(persistentTasks); snapshotUpgradeTasks = snapshotUpgradeTasks(persistentTasks); @@ -98,7 +98,7 @@ private static List getWaitingAllocatedModels(Map getWaitingAnalyticsJobs( - Collection> dataframeAnalyticsTasks + Collection> dataframeAnalyticsTasks ) { return dataframeAnalyticsTasks.stream() .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) @@ -107,7 +107,7 @@ private static List getWaitingAnalyticsJobs( } private static List getWaitingSnapshotUpgrades( - Collection> snapshotUpgradeTasks + Collection> snapshotUpgradeTasks ) { return snapshotUpgradeTasks.stream() .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) @@ -115,15 +115,15 @@ private static List getWaitingSnapshotUpgrades( .toList(); } - private static List waitingAnomalyJobs(Collection> anomalyDetectionTasks) { + private static List waitingAnomalyJobs(Collection> anomalyDetectionTasks) { return anomalyDetectionTasks.stream() .filter(t -> AWAITING_LAZY_ASSIGNMENT.equals(t.getAssignment())) .map(t -> ((OpenJobAction.JobParams) t.getParams()).getJobId()) .toList(); } - private static Collection> anomalyDetectionTasks( - PersistentTasksCustomMetadata tasksCustomMetadata + private static Collection> anomalyDetectionTasks( + PersistentTasksMetadataSection tasksCustomMetadata ) { if (tasksCustomMetadata == null) { return List.of(); @@ -132,8 +132,8 @@ private static Collection> anoma return tasksCustomMetadata.findTasks(MlTasks.JOB_TASK_NAME, t -> taskStateFilter(getJobStateModifiedForReassignments(t))); } - private static Collection> snapshotUpgradeTasks( - PersistentTasksCustomMetadata tasksCustomMetadata + private static Collection> snapshotUpgradeTasks( + PersistentTasksMetadataSection tasksCustomMetadata ) { if (tasksCustomMetadata == null) { return List.of(); @@ -142,8 +142,8 @@ private static Collection> snaps return tasksCustomMetadata.findTasks(MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, t -> taskStateFilter(getSnapshotUpgradeState(t))); } - static Collection> dataframeAnalyticsTasks( - PersistentTasksCustomMetadata tasksCustomMetadata + static Collection> dataframeAnalyticsTasks( + PersistentTasksMetadataSection tasksCustomMetadata ) { if (tasksCustomMetadata == null) { return List.of(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java index dfe0e557f749d..2c3de1ba23bc1 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDecider.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.logging.LogManager; import org.elasticsearch.logging.Logger; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingDeciderContext; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -354,7 +354,7 @@ private MlMemoryAutoscalingCapacity refreshMemoryTrackerAndBuildEmptyDecision(St private long maxMemoryBytes(MlAutoscalingContext mlContext) { long maxMemoryBytes = Math.max( mlContext.anomalyDetectionTasks.stream() - .filter(PersistentTasksCustomMetadata.PersistentTask::isAssigned) + .filter(PersistentTasksMetadataSection.PersistentTask::isAssigned) // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used .mapToLong(t -> { Long mem = getAnomalyMemoryRequirement(t); @@ -367,7 +367,7 @@ private long maxMemoryBytes(MlAutoscalingContext mlContext) { .max() .orElse(0L), mlContext.snapshotUpgradeTasks.stream() - .filter(PersistentTasksCustomMetadata.PersistentTask::isAssigned) + .filter(PersistentTasksMetadataSection.PersistentTask::isAssigned) // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used .mapToLong(t -> { Long mem = getAnomalyMemoryRequirement(t); @@ -383,7 +383,7 @@ private long maxMemoryBytes(MlAutoscalingContext mlContext) { maxMemoryBytes = Math.max( maxMemoryBytes, mlContext.dataframeAnalyticsTasks.stream() - .filter(PersistentTasksCustomMetadata.PersistentTask::isAssigned) + .filter(PersistentTasksMetadataSection.PersistentTask::isAssigned) // Memory SHOULD be recently refreshed, so in our current state, we should at least have an idea of the memory used .mapToLong(t -> { Long mem = this.getAnalyticsMemoryRequirement(t); @@ -851,7 +851,7 @@ static boolean modelAssignmentsRequireMoreThanHalfCpu( */ Optional calculateFutureAvailableCapacity(Collection mlNodes, ClusterState clusterState) { return calculateFutureAvailableCapacity( - clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE), + clusterState.metadata().section(PersistentTasksMetadataSection.TYPE), mlNodes.stream() .map(node -> nodeLoadDetector.detectNodeLoad(clusterState, node, maxOpenJobs, maxMachineMemoryPercent, useAuto)) .toList() @@ -868,10 +868,10 @@ Optional calculateFutureAvailableCapacity(Collection 1 "batch" ml tasks are running on the same node, we sum their resources. */ - Optional calculateFutureAvailableCapacity(PersistentTasksCustomMetadata tasks, List nodeLoads) { - final List> jobsWithLookbackDatafeeds = + Optional calculateFutureAvailableCapacity(PersistentTasksMetadataSection tasks, List nodeLoads) { + final List> jobsWithLookbackDatafeeds = datafeedTasks(tasks).stream().filter(t -> t.getParams().getEndTime() != null && t.getExecutorNode() != null).toList(); - final List> assignedAnalyticsJobs = MlAutoscalingContext.dataframeAnalyticsTasks( + final List> assignedAnalyticsJobs = MlAutoscalingContext.dataframeAnalyticsTasks( tasks ).stream().filter(t -> t.getExecutorNode() != null).toList(); @@ -884,14 +884,14 @@ Optional calculateFutureAvailableCapacity(PersistentTasksC } freeMemoryByNodeId.put(nodeLoad.getNodeId(), nodeLoad.getFreeMemoryExcludingPerNodeOverhead()); } - for (PersistentTasksCustomMetadata.PersistentTask lookbackOnlyDf : jobsWithLookbackDatafeeds) { + for (PersistentTasksMetadataSection.PersistentTask lookbackOnlyDf : jobsWithLookbackDatafeeds) { Long jobSize = getAnomalyMemoryRequirement(lookbackOnlyDf.getParams().getJobId()); if (jobSize == null) { return Optional.empty(); } freeMemoryByNodeId.compute(lookbackOnlyDf.getExecutorNode(), (k, v) -> v == null ? jobSize : jobSize + v); } - for (PersistentTasksCustomMetadata.PersistentTask task : assignedAnalyticsJobs) { + for (PersistentTasksMetadataSection.PersistentTask task : assignedAnalyticsJobs) { Long jobSize = getAnalyticsMemoryRequirement(MlTasks.dataFrameAnalyticsId(task.getId())); if (jobSize == null) { return Optional.empty(); @@ -907,8 +907,8 @@ Optional calculateFutureAvailableCapacity(PersistentTasksC } @SuppressWarnings("unchecked") - private static Collection> datafeedTasks( - PersistentTasksCustomMetadata tasksCustomMetadata + private static Collection> datafeedTasks( + PersistentTasksMetadataSection tasksCustomMetadata ) { if (tasksCustomMetadata == null) { return List.of(); @@ -916,7 +916,7 @@ private static Collection (PersistentTasksCustomMetadata.PersistentTask) p) + .map(p -> (PersistentTasksMetadataSection.PersistentTask) p) .toList(); } @@ -936,7 +936,7 @@ private Long getAllocatedModelRequirement(String modelId) { return mem; } - private Long getAnalyticsMemoryRequirement(PersistentTasksCustomMetadata.PersistentTask task) { + private Long getAnalyticsMemoryRequirement(PersistentTasksMetadataSection.PersistentTask task) { return getAnalyticsMemoryRequirement(MlTasks.dataFrameAnalyticsId(task.getId())); } @@ -948,7 +948,7 @@ private Long getAnomalyMemoryRequirement(String anomalyId) { return mem; } - private Long getAnomalyMemoryRequirement(PersistentTasksCustomMetadata.PersistentTask task) { + private Long getAnomalyMemoryRequirement(PersistentTasksMetadataSection.PersistentTask task) { return getAnomalyMemoryRequirement(MlTasks.jobId(task.getId())); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java index d44d2181f0ce8..27a6757ae845c 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedManager.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.license.RemoteClusterLicenseChecker; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -264,8 +264,8 @@ public void deleteDatafeed(DeleteDatafeedAction.Request request, ClusterState st } - private static PersistentTasksCustomMetadata.PersistentTask getDatafeedTask(ClusterState state, String datafeedId) { - PersistentTasksCustomMetadata tasks = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + private static PersistentTasksMetadataSection.PersistentTask getDatafeedTask(ClusterState state, String datafeedId) { + PersistentTasksMetadataSection tasks = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); return MlTasks.getDatafeedTask(datafeedId, tasks); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java index 31add7b37ac5f..b59a100a19f5e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelector.java @@ -17,7 +17,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.core.Nullable; import org.elasticsearch.license.RemoteClusterLicenseChecker; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -38,11 +38,11 @@ public class DatafeedNodeSelector { private static final Logger LOGGER = LogManager.getLogger(DatafeedNodeSelector.class); - public static final PersistentTasksCustomMetadata.Assignment AWAITING_JOB_ASSIGNMENT = new PersistentTasksCustomMetadata.Assignment( + public static final PersistentTasksMetadataSection.Assignment AWAITING_JOB_ASSIGNMENT = new PersistentTasksMetadataSection.Assignment( null, "datafeed awaiting job assignment." ); - public static final PersistentTasksCustomMetadata.Assignment AWAITING_JOB_RELOCATION = new PersistentTasksCustomMetadata.Assignment( + public static final PersistentTasksMetadataSection.Assignment AWAITING_JOB_RELOCATION = new PersistentTasksMetadataSection.Assignment( null, "datafeed awaiting job relocation." ); @@ -50,7 +50,7 @@ public class DatafeedNodeSelector { private final String datafeedId; private final String jobId; private final List datafeedIndices; - private final PersistentTasksCustomMetadata.PersistentTask jobTask; + private final PersistentTasksMetadataSection.PersistentTask jobTask; private final ClusterState clusterState; private final IndexNameExpressionResolver resolver; private final IndicesOptions indicesOptions; @@ -63,7 +63,7 @@ public DatafeedNodeSelector( List datafeedIndices, IndicesOptions indicesOptions ) { - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); this.datafeedId = datafeedId; this.jobId = jobId; this.datafeedIndices = datafeedIndices; @@ -105,7 +105,7 @@ public void checkDatafeedTaskCanBeCreated() { * @return The assignment for the datafeed, containing either an executor node or a reason why an * executor node was not returned. */ - public PersistentTasksCustomMetadata.Assignment selectNode(Collection candidateNodes) { + public PersistentTasksMetadataSection.Assignment selectNode(Collection candidateNodes) { if (MlMetadata.getMlMetadata(clusterState).isUpgradeMode()) { return AWAITING_UPGRADE; } @@ -126,11 +126,11 @@ public PersistentTasksCustomMetadata.Assignment selectNode(Collection candidateNode.getId().equals(jobNode)) == false) { return AWAITING_JOB_RELOCATION; } - return new PersistentTasksCustomMetadata.Assignment(jobNode, ""); + return new PersistentTasksMetadataSection.Assignment(jobNode, ""); } LOGGER.debug(assignmentFailure.reason); assert assignmentFailure.reason.isEmpty() == false; - return new PersistentTasksCustomMetadata.Assignment(null, assignmentFailure.reason); + return new PersistentTasksMetadataSection.Assignment(null, assignmentFailure.reason); } @Nullable diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java index 99ad92ee2b91e..f2e823a588bd8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunner.java @@ -19,8 +19,8 @@ import org.elasticsearch.common.util.concurrent.AbstractRunnable; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -356,12 +356,12 @@ private String getJobIdIfDatafeedRunningOnThisNode(TransportStartDatafeedAction. return holder.getJobId(); } - private static JobState getJobState(PersistentTasksCustomMetadata tasks, String jobId) { + private static JobState getJobState(PersistentTasksMetadataSection tasks, String jobId) { return MlTasks.getJobStateModifiedForReassignments(jobId, tasks); } - private boolean jobHasOpenAutodetectCommunicator(PersistentTasksCustomMetadata tasks, String jobId) { - PersistentTasksCustomMetadata.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); + private boolean jobHasOpenAutodetectCommunicator(PersistentTasksMetadataSection tasks, String jobId) { + PersistentTasksMetadataSection.PersistentTask jobTask = MlTasks.getJobTask(jobId, tasks); if (jobTask == null) { return false; } @@ -569,7 +569,7 @@ private long executeRealTime() throws Exception { private void closeJob() { ClusterState clusterState = clusterService.state(); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); JobState jobState = MlTasks.getJobState(getJobId(), tasks); if (jobState != JobState.OPENED) { logger.debug("[{}] No need to auto-close job as job state is [{}]", getJobId(), jobState); @@ -635,7 +635,7 @@ private class TaskRunner implements ClusterStateListener { private void runWhenJobIsOpened(TransportStartDatafeedAction.DatafeedTask datafeedTask, String jobId) { ClusterState clusterState = clusterService.state(); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (getJobState(tasks, jobId) == JobState.OPENED && jobHasOpenAutodetectCommunicator(tasks, jobId)) { runTask(datafeedTask); } else { @@ -667,8 +667,8 @@ public void clusterChanged(ClusterChangedEvent event) { if (tasksToRun.isEmpty() || event.metadataChanged() == false) { return; } - PersistentTasksCustomMetadata previousTasks = event.previousState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata currentTasks = event.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection previousTasks = event.previousState().getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection currentTasks = event.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); if (Objects.equals(previousTasks, currentTasks)) { return; } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java index 20da61a3d6910..6dfaaceeb7169 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/datafeed/persistence/DatafeedConfigProvider.java @@ -41,7 +41,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.query.WildcardQueryBuilder; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.tasks.TaskId; @@ -419,7 +419,7 @@ private void indexUpdatedConfig(DatafeedConfig updatedConfig, long seqNo, long p public void expandDatafeedIds( String expression, boolean allowNoMatch, - PersistentTasksCustomMetadata tasks, + PersistentTasksMetadataSection tasks, boolean allowMissingConfigs, @Nullable TaskId parentTaskId, ActionListener> listener @@ -471,10 +471,10 @@ public void expandDatafeedIds( } /** - * The same logic as {@link #expandDatafeedIds(String, boolean, PersistentTasksCustomMetadata, boolean, TaskId, ActionListener)} but + * The same logic as {@link #expandDatafeedIds(String, boolean, PersistentTasksMetadataSection, boolean, TaskId, ActionListener)} but * the full datafeed configuration is returned. * - * See {@link #expandDatafeedIds(String, boolean, PersistentTasksCustomMetadata, boolean, TaskId, ActionListener)} + * See {@link #expandDatafeedIds(String, boolean, PersistentTasksMetadataSection, boolean, TaskId, ActionListener)} * * @param expression the expression to resolve * @param allowNoMatch if {@code false}, an error is thrown when no name matches the {@code expression}. @@ -569,7 +569,7 @@ private static QueryBuilder buildDatafeedIdQuery(String[] tokens) { return boolQueryBuilder; } - static Collection matchingDatafeedIdsWithTasks(String[] datafeedIdPatterns, PersistentTasksCustomMetadata tasksMetadata) { + static Collection matchingDatafeedIdsWithTasks(String[] datafeedIdPatterns, PersistentTasksMetadataSection tasksMetadata) { return MlStrings.findMatching(datafeedIdPatterns, MlTasks.startedDatafeedIds(tasksMetadata)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java index cb32ca01241a8..6c1a7ce456630 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/DataFrameAnalyticsManager.java @@ -23,7 +23,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.ml.MlStatsIndex; @@ -342,7 +342,7 @@ public Optional getMemoryLimitIfKnown(String id) { * @param tasks Persistent tasks metadata. * @return Memory used by data frame analytics jobs that are active on the current node. */ - public ByteSizeValue getActiveTaskMemoryUsage(PersistentTasksCustomMetadata tasks) { + public ByteSizeValue getActiveTaskMemoryUsage(PersistentTasksMetadataSection tasks) { long memoryUsedBytes = 0; for (Map.Entry entry : memoryLimitById.entrySet()) { DataFrameAnalyticsState state = MlTasks.getDataFrameAnalyticsState(entry.getKey(), tasks); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java index 47071c80b90ee..a6ef576c783f2 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/dataframe/persistence/DataFrameAnalyticsConfigProvider.java @@ -35,7 +35,7 @@ import org.elasticsearch.index.engine.VersionConflictEngineException; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -205,7 +205,7 @@ private static void checkUpdateCanBeApplied( ClusterState clusterState ) { String analyticsId = update.getId(); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); DataFrameAnalyticsState analyticsState = MlTasks.getDataFrameAnalyticsState(analyticsId, tasks); if (DataFrameAnalyticsState.STOPPED.equals(analyticsState)) { // Analytics is stopped, therefore it is safe to proceed with the udpate diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java index 96490716c5c6c..10343bf38e698 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterService.java @@ -34,7 +34,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.SuppressForbidden; import org.elasticsearch.gateway.GatewayService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; @@ -525,10 +525,10 @@ private static ClusterState forceUpdate(ClusterState currentState, TrainedModelA logger.debug(() -> format("updated assignments: %s", modelAssignments.build())); Metadata.Builder metadata = Metadata.builder(currentState.metadata()); if (currentState.getMinTransportVersion().onOrAfter(RENAME_ALLOCATION_TO_ASSIGNMENT_TRANSPORT_VERSION)) { - metadata.putCustom(TrainedModelAssignmentMetadata.NAME, modelAssignments.build()) - .removeCustom(TrainedModelAssignmentMetadata.DEPRECATED_NAME); + metadata.putSection(TrainedModelAssignmentMetadata.NAME, modelAssignments.build()) + .removeSection(TrainedModelAssignmentMetadata.DEPRECATED_NAME); } else { - metadata.putCustom(TrainedModelAssignmentMetadata.DEPRECATED_NAME, modelAssignments.buildOld()); + metadata.putSection(TrainedModelAssignmentMetadata.DEPRECATED_NAME, modelAssignments.buildOld()); } return ClusterState.builder(currentState).metadata(metadata).build(); } @@ -1113,26 +1113,26 @@ static Optional detectReasonToRebalanceModels(final ClusterChangedEvent } static Optional detectReasonIfMlJobsStopped(ClusterChangedEvent event) { - if (event.changedCustomMetadataSet().contains(PersistentTasksCustomMetadata.TYPE) == false) { + if (event.changedCustomMetadataSet().contains(PersistentTasksMetadataSection.TYPE) == false) { return Optional.empty(); } - PersistentTasksCustomMetadata previousPersistentTasks = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata( + PersistentTasksMetadataSection previousPersistentTasks = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata( event.previousState() ); if (previousPersistentTasks == null) { // no previous jobs so nothing has stopped return Optional.empty(); } - PersistentTasksCustomMetadata currentPersistentTasks = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata( + PersistentTasksMetadataSection currentPersistentTasks = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata( event.state() ); Set currentMlTaskIds = findMlProcessTaskIds(currentPersistentTasks); - Set> previousMlTasks = MlTasks.findMlProcessTasks(previousPersistentTasks); + Set> previousMlTasks = MlTasks.findMlProcessTasks(previousPersistentTasks); Set stoppedTaskTypes = previousMlTasks.stream() .filter(task -> currentMlTaskIds.contains(task.getId()) == false) // remove the tasks that are still present. Stopped Ids only. - .map(PersistentTasksCustomMetadata.PersistentTask::getTaskName) + .map(PersistentTasksMetadataSection.PersistentTask::getTaskName) .map(MlTasks::prettyPrintTaskName) .collect(Collectors.toSet()); if (stoppedTaskTypes.size() == 1) { @@ -1143,12 +1143,12 @@ static Optional detectReasonIfMlJobsStopped(ClusterChangedEvent event) { return Optional.empty(); } - private static Set findMlProcessTaskIds(@Nullable PersistentTasksCustomMetadata metadata) { + private static Set findMlProcessTaskIds(@Nullable PersistentTasksMetadataSection metadata) { return metadata == null ? Set.of() : MlTasks.findMlProcessTasks(metadata) .stream() - .map(PersistentTasksCustomMetadata.PersistentTask::getId) + .map(PersistentTasksMetadataSection.PersistentTask::getId) .collect(Collectors.toSet()); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java index deb645ff96133..2515b88f5f19d 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingService.java @@ -766,7 +766,7 @@ public void clusterChanged(ClusterChangedEvent event) { } ClusterState state = event.state(); - IngestMetadata currentIngestMetadata = state.metadata().custom(IngestMetadata.TYPE); + IngestMetadata currentIngestMetadata = state.metadata().section(IngestMetadata.TYPE); Set allReferencedModelKeys = event.changedCustomMetadataSet().contains(IngestMetadata.TYPE) ? countInferenceProcessors(currentIngestMetadata) : new HashSet<>(referencedModels); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelCacheMetadataService.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelCacheMetadataService.java index bd7510a09a013..72c3212fa25dd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelCacheMetadataService.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/inference/persistence/TrainedModelCacheMetadataService.java @@ -124,7 +124,7 @@ public ClusterState execute(BatchExecutionContext batch } return ClusterState.builder(initialState) - .metadata(Metadata.builder(initialState.metadata()).putCustom(TrainedModelCacheMetadata.NAME, currentCacheMetadata)) + .metadata(Metadata.builder(initialState.metadata()).putSection(TrainedModelCacheMetadata.NAME, currentCacheMetadata)) .build(); } } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java index 9887152c6f311..9df36337dcb74 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobManager.java @@ -24,7 +24,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Nullable; import org.elasticsearch.index.analysis.AnalysisRegistry; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xcontent.NamedXContentRegistry; @@ -536,13 +536,13 @@ private void auditJobUpdatedIfNotInternal(UpdateJobAction.Request request) { } private static boolean isJobOpen(ClusterState clusterState, String jobId) { - PersistentTasksCustomMetadata persistentTasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = clusterState.metadata().section(PersistentTasksMetadataSection.TYPE); JobState jobState = MlTasks.getJobState(jobId, persistentTasks); return jobState == JobState.OPENED; } private static Set openJobIds(ClusterState clusterState) { - PersistentTasksCustomMetadata persistentTasks = clusterState.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = clusterState.metadata().section(PersistentTasksMetadataSection.TYPE); return MlTasks.openJobIds(persistentTasks); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java index a24e671d1fe25..5277860489edd 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/JobNodeSelector.java @@ -13,7 +13,7 @@ import org.elasticsearch.common.Strings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.ml.MachineLearning; import org.elasticsearch.xpack.ml.autoscaling.NativeMemoryCapacity; @@ -51,7 +51,7 @@ */ public class JobNodeSelector { - public static final PersistentTasksCustomMetadata.Assignment AWAITING_LAZY_ASSIGNMENT = new PersistentTasksCustomMetadata.Assignment( + public static final PersistentTasksMetadataSection.Assignment AWAITING_LAZY_ASSIGNMENT = new PersistentTasksMetadataSection.Assignment( null, "persistent task is awaiting node assignment." ); @@ -121,7 +121,7 @@ public Tuple currentCapacityAndMaxFreeMemory( return Tuple.tuple(currentCapacityForMl, mostAvailableMemory); } - public PersistentTasksCustomMetadata.Assignment selectNode( + public PersistentTasksMetadataSection.Assignment selectNode( int dynamicMaxOpenJobs, int maxConcurrentJobAllocations, int maxMachineMemoryPercent, @@ -139,7 +139,7 @@ public PersistentTasksCustomMetadata.Assignment selectNode( ); } - public PersistentTasksCustomMetadata.Assignment selectNode( + public PersistentTasksMetadataSection.Assignment selectNode( Long estimatedMemoryFootprint, int dynamicMaxOpenJobs, int maxConcurrentJobAllocations, @@ -151,7 +151,7 @@ public PersistentTasksCustomMetadata.Assignment selectNode( memoryTracker.asyncRefresh(); String reason = "Not opening job [" + jobId + "] because job memory requirements are stale - refresh requested"; logger.debug(reason); - return new PersistentTasksCustomMetadata.Assignment(null, reason); + return new PersistentTasksMetadataSection.Assignment(null, reason); } Map reasons = new TreeMap<>(); long maxAvailableMemory = Long.MIN_VALUE; @@ -274,7 +274,7 @@ public PersistentTasksCustomMetadata.Assignment selectNode( ); } - PersistentTasksCustomMetadata.Assignment createAssignment( + PersistentTasksMetadataSection.Assignment createAssignment( long estimatedMemoryUsage, DiscoveryNode minLoadedNode, Collection reasons, @@ -283,7 +283,7 @@ PersistentTasksCustomMetadata.Assignment createAssignment( ) { if (minLoadedNode == null) { String explanation = String.join("|", reasons); - PersistentTasksCustomMetadata.Assignment currentAssignment = new PersistentTasksCustomMetadata.Assignment(null, explanation); + PersistentTasksMetadataSection.Assignment currentAssignment = new PersistentTasksMetadataSection.Assignment(null, explanation); logger.debug("no node selected for job [{}], reasons [{}]", jobId, explanation); if ((MachineLearning.NATIVE_EXECUTABLE_CODE_OVERHEAD.getBytes() + estimatedMemoryUsage) > mostAvailableMemoryForML) { String message = format( @@ -296,16 +296,16 @@ PersistentTasksCustomMetadata.Assignment createAssignment( List newReasons = new ArrayList<>(reasons); newReasons.add(message); explanation = String.join("|", newReasons); - return new PersistentTasksCustomMetadata.Assignment(null, explanation); + return new PersistentTasksMetadataSection.Assignment(null, explanation); } return considerLazyAssignment(currentAssignment, maxNodeSize); } logger.debug("selected node [{}] for job [{}]", minLoadedNode, jobId); - return new PersistentTasksCustomMetadata.Assignment(minLoadedNode.getId(), ""); + return new PersistentTasksMetadataSection.Assignment(minLoadedNode.getId(), ""); } - PersistentTasksCustomMetadata.Assignment considerLazyAssignment( - PersistentTasksCustomMetadata.Assignment currentAssignment, + PersistentTasksMetadataSection.Assignment considerLazyAssignment( + PersistentTasksMetadataSection.Assignment currentAssignment, long maxNodeSize ) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java index f2bf180943b82..9f432c272da29 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/NodeLoadDetector.java @@ -11,7 +11,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.common.Strings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingInfo; import org.elasticsearch.xpack.core.ml.inference.assignment.RoutingState; @@ -87,7 +87,7 @@ public NodeLoad detectNodeLoad( int maxMachineMemoryPercent, boolean useAutoMachineMemoryCalculation ) { - PersistentTasksCustomMetadata persistentTasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); Map nodeAttributes = node.getAttributes(); List errors = new ArrayList<>(); OptionalLong maxMlMemory = NativeMemoryCalculator.allowedBytesForMl(node, maxMachineMemoryPercent, useAutoMachineMemoryCalculation); @@ -119,13 +119,13 @@ public NodeLoad detectNodeLoad( return nodeLoad.build(); } - private void updateLoadGivenTasks(NodeLoad.Builder nodeLoad, PersistentTasksCustomMetadata persistentTasks) { + private void updateLoadGivenTasks(NodeLoad.Builder nodeLoad, PersistentTasksMetadataSection persistentTasks) { if (persistentTasks != null) { - Collection> memoryTrackedTasks = findAllMemoryTrackedTasks( + Collection> memoryTrackedTasks = findAllMemoryTrackedTasks( persistentTasks, nodeLoad.getNodeId() ); - for (PersistentTasksCustomMetadata.PersistentTask task : memoryTrackedTasks) { + for (PersistentTasksMetadataSection.PersistentTask task : memoryTrackedTasks) { MemoryTrackedTaskState state = MlTasks.getMemoryTrackedTaskState(task); assert state != null : "null MemoryTrackedTaskState for memory tracked task with params " + task.getParams(); if (state != null && state.consumesMemory()) { @@ -153,8 +153,8 @@ private static void updateLoadGivenModelAssignments( } } - private static Collection> findAllMemoryTrackedTasks( - PersistentTasksCustomMetadata persistentTasks, + private static Collection> findAllMemoryTrackedTasks( + PersistentTasksMetadataSection persistentTasks, String nodeId ) { return persistentTasks.tasks() @@ -164,7 +164,7 @@ private static Collection> findA .collect(Collectors.toList()); } - private static boolean isMemoryTrackedTask(PersistentTasksCustomMetadata.PersistentTask task) { + private static boolean isMemoryTrackedTask(PersistentTasksMetadataSection.PersistentTask task) { return MlTasks.JOB_TASK_NAME.equals(task.getTaskName()) || MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME.equals(task.getTaskName()) || MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME.equals(task.getTaskName()); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java index 8493513f40bd6..f219fbc20af0e 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/persistence/JobConfigProvider.java @@ -43,7 +43,7 @@ import org.elasticsearch.index.query.TermQueryBuilder; import org.elasticsearch.index.query.TermsQueryBuilder; import org.elasticsearch.index.query.WildcardQueryBuilder; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchHit; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.fetch.subphase.FetchSourceContext; @@ -488,7 +488,7 @@ public void expandJobsIds( String expression, boolean allowNoMatch, boolean excludeDeleting, - @Nullable PersistentTasksCustomMetadata tasksCustomMetadata, + @Nullable PersistentTasksMetadataSection tasksCustomMetadata, boolean allowMissingConfigs, @Nullable TaskId parentTaskId, ActionListener> listener @@ -547,10 +547,10 @@ public void expandJobsIds( /** * The same logic as - * {@link #expandJobsIds(String, boolean, boolean, PersistentTasksCustomMetadata, boolean, TaskId, ActionListener)} but + * {@link #expandJobsIds(String, boolean, boolean, PersistentTasksMetadataSection, boolean, TaskId, ActionListener)} but * the full anomaly detector job configuration is returned. * - * See {@link #expandJobsIds(String, boolean, boolean, PersistentTasksCustomMetadata, boolean, TaskId, ActionListener)} + * See {@link #expandJobsIds(String, boolean, boolean, PersistentTasksMetadataSection, boolean, TaskId, ActionListener)} * * @param expression the expression to resolve * @param allowNoMatch if {@code false}, an error is thrown when no name matches the {@code expression}. @@ -620,7 +620,7 @@ public void expandJobs( /** * Expands the list of job group Ids to the set of jobs which are members of the groups. - * Unlike {@link #expandJobsIds(String, boolean, boolean, PersistentTasksCustomMetadata, boolean, TaskId, ActionListener)} it is not an + * Unlike {@link #expandJobsIds(String, boolean, boolean, PersistentTasksMetadataSection, boolean, TaskId, ActionListener)} it is not an * error if a group Id does not exist. * Wildcard expansion of group Ids is not supported. * @@ -754,7 +754,7 @@ public void validateDatafeedJob(DatafeedConfig config, ActionListener l }, listener::onFailure)); } - static Collection matchingJobIdsWithTasks(String[] jobIdPatterns, PersistentTasksCustomMetadata tasksMetadata) { + static Collection matchingJobIdsWithTasks(String[] jobIdPatterns, PersistentTasksMetadataSection tasksMetadata) { return MlStrings.findMatching(jobIdPatterns, MlTasks.openJobIds(tasksMetadata)); } diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java index d69acab30451a..03f4fc0a25c98 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/process/autodetect/JobModelSnapshotUpgrader.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.util.concurrent.FutureUtils; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.IOUtils; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.job.config.AnalysisConfig; diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicate.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicate.java index 371575eeabee8..7879640c76a55 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicate.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicate.java @@ -9,7 +9,7 @@ import org.apache.logging.log4j.Logger; import org.elasticsearch.ElasticsearchException; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskParams; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeTaskState; @@ -20,7 +20,7 @@ import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutor.checkAssignmentState; -public class SnapshotUpgradePredicate implements Predicate> { +public class SnapshotUpgradePredicate implements Predicate> { private final boolean waitForCompletion; private final Logger logger; private volatile Exception exception; @@ -50,7 +50,7 @@ public boolean isCompleted() { } @Override - public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public boolean test(PersistentTasksMetadataSection.PersistentTask persistentTask) { // Persistent task being null means it has been removed from state, and is now complete if (persistentTask == null) { isCompleted = true; @@ -61,7 +61,7 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa ? SnapshotUpgradeState.STOPPED : snapshotUpgradeTaskState.getState(); String reason = snapshotUpgradeTaskState == null ? "" : snapshotUpgradeTaskState.getReason(); - PersistentTasksCustomMetadata.Assignment assignment = persistentTask.getAssignment(); + PersistentTasksMetadataSection.Assignment assignment = persistentTask.getAssignment(); // This logic is only appropriate when opening a job, not when reallocating following a failure, // and this is why this class must only be used when opening a job SnapshotUpgradeTaskParams params = (SnapshotUpgradeTaskParams) persistentTask.getParams(); diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java index cc3f8f0dd1e67..0ddc5c609fc83 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradeTaskExecutor.java @@ -21,7 +21,7 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.xpack.core.ml.MlConfigIndex; @@ -88,13 +88,13 @@ public SnapshotUpgradeTaskExecutor( } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( SnapshotUpgradeTaskParams params, Collection candidateNodes, ClusterState clusterState ) { boolean isMemoryTrackerRecentlyRefreshed = memoryTracker.isRecentlyRefreshed(); - Optional optionalAssignment = getPotentialAssignment( + Optional optionalAssignment = getPotentialAssignment( params, clusterState, isMemoryTrackerRecentlyRefreshed @@ -232,7 +232,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Map headers ) { return new SnapshotUpgradeTask( diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java index 89180cba77dfd..7dedb29b866ae 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutor.java @@ -27,8 +27,8 @@ import org.elasticsearch.license.XPackLicenseState; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.persistent.decider.EnableAssignmentDecider; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; @@ -224,7 +224,7 @@ public void validate(OpenJobAction.JobParams params, ClusterState clusterState) validateJobAndId(jobId, job); // If we already know that we can't find an ml node because all ml nodes are running at capacity or // simply because there are no ml nodes in the cluster then we fail quickly here: - PersistentTasksCustomMetadata.Assignment assignment = getAssignment(params, clusterState.nodes().getAllNodes(), clusterState); + PersistentTasksMetadataSection.Assignment assignment = getAssignment(params, clusterState.nodes().getAllNodes(), clusterState); if (assignment.equals(AWAITING_UPGRADE)) { throw makeCurrentlyBeingUpgradedException(logger, params.getJobId()); } @@ -406,8 +406,8 @@ private void getRunningDatafeed(String jobId, ActionListener listener) { } String datafeedId = datafeeds.iterator().next(); - PersistentTasksCustomMetadata tasks = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); - PersistentTasksCustomMetadata.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); + PersistentTasksMetadataSection tasks = clusterState.getMetadata().section(PersistentTasksMetadataSection.TYPE); + PersistentTasksMetadataSection.PersistentTask datafeedTask = MlTasks.getDatafeedTask(datafeedId, tasks); delegate.onResponse(datafeedTask != null ? datafeedId : null); }); @@ -612,19 +612,19 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Map headers ) { return new JobTask(persistentTask.getParams().getJobId(), id, type, action, parentTaskId, headers, licenseState); } public static Optional checkAssignmentState( - PersistentTasksCustomMetadata.Assignment assignment, + PersistentTasksMetadataSection.Assignment assignment, String jobId, Logger logger ) { if (assignment != null - && assignment.equals(PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT) == false + && assignment.equals(PersistentTasksMetadataSection.INITIAL_ASSIGNMENT) == false && assignment.isAssigned() == false) { // Assignment has failed on the master node despite passing our "fast fail" validation if (assignment.equals(AWAITING_UPGRADE)) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java index d1f1d0d506c85..86e02eaa3d1f8 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/process/MlMemoryTracker.java @@ -18,7 +18,7 @@ import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.OpenJobAction; @@ -333,7 +333,7 @@ public boolean asyncRefresh() { e -> logIfNecessary(() -> logger.warn("Failed to refresh job memory requirements", e)) ); threadPool.executor(MachineLearning.UTILITY_THREAD_POOL_NAME) - .execute(() -> refresh(clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE), listener)); + .execute(() -> refresh(clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE), listener)); return true; } catch (EsRejectedExecutionException e) { logger.warn("Couldn't schedule ML memory update - node might be shutting down", e); @@ -364,7 +364,7 @@ public void refreshAnomalyDetectorJobMemoryAndAllOthers(String jobId, ActionList // Skip the provided job ID in the main refresh, as we unconditionally do it at the end. // Otherwise it might get refreshed twice, because it could have both a job task and a snapshot upgrade task. refresh( - clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE), + clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE), Collections.singleton(jobId), listener.delegateFailureAndWrap((l, aVoid) -> refreshAnomalyDetectorJobMemory(jobId, l)) ); @@ -389,7 +389,7 @@ public void addDataFrameAnalyticsJobMemoryAndRefreshAllOthers(String id, long me memoryRequirementByDataFrameAnalyticsJob.put(id, mem + DataFrameAnalyticsConfig.PROCESS_MEMORY_OVERHEAD.getBytes()); - PersistentTasksCustomMetadata persistentTasks = clusterService.state().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = clusterService.state().getMetadata().section(PersistentTasksMetadataSection.TYPE); refresh(persistentTasks, listener); } @@ -399,11 +399,11 @@ public void addDataFrameAnalyticsJobMemoryAndRefreshAllOthers(String id, long me * to a race where a job was opened part way through the refresh. (Instead, entries are removed when * jobs are deleted.) */ - public void refresh(PersistentTasksCustomMetadata persistentTasks, ActionListener onCompletion) { + public void refresh(PersistentTasksMetadataSection persistentTasks, ActionListener onCompletion) { refresh(persistentTasks, Collections.emptySet(), onCompletion); } - void refresh(PersistentTasksCustomMetadata persistentTasks, Set jobIdsToSkip, ActionListener onCompletion) { + void refresh(PersistentTasksMetadataSection persistentTasks, Set jobIdsToSkip, ActionListener onCompletion) { synchronized (fullRefreshCompletionListeners) { fullRefreshCompletionListeners.add(onCompletion); @@ -448,7 +448,7 @@ void refresh(PersistentTasksCustomMetadata persistentTasks, Set jobIdsTo if (persistentTasks == null) { refreshComplete.onResponse(null); } else { - List> mlDataFrameAnalyticsJobTasks = persistentTasks.tasks() + List> mlDataFrameAnalyticsJobTasks = persistentTasks.tasks() .stream() .filter(task -> MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME.equals(task.getTaskName())) .toList(); @@ -491,7 +491,7 @@ private void iterateAnomalyDetectorJobs(Iterator iterator, ActionListene } private void refreshAllDataFrameAnalyticsJobTasks( - List> mlDataFrameAnalyticsJobTasks, + List> mlDataFrameAnalyticsJobTasks, ActionListener listener ) { if (mlDataFrameAnalyticsJobTasks.isEmpty()) { diff --git a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java index 32543b45259c2..550b355dbc095 100644 --- a/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java +++ b/x-pack/plugin/ml/src/main/java/org/elasticsearch/xpack/ml/task/AbstractJobPersistentTasksExecutor.java @@ -21,8 +21,8 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.common.notifications.AbstractAuditor; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.job.messages.Messages; @@ -124,7 +124,7 @@ protected String getUniqueId(String jobId) { protected void auditRequireMemoryIfNecessary( String jobId, AbstractAuditor auditor, - PersistentTasksCustomMetadata.Assignment assignment, + PersistentTasksMetadataSection.Assignment assignment, JobNodeSelector jobNodeSelector, boolean isMemoryTrackerRecentlyRefreshed ) { @@ -164,7 +164,7 @@ protected boolean allowsMissingIndices() { return true; } - public Optional getPotentialAssignment( + public Optional getPotentialAssignment( Params params, ClusterState clusterState, boolean isMemoryTrackerRecentlyRefreshed @@ -178,7 +178,7 @@ public Optional getPotentialAssignment } String jobId = getJobId(params); - Optional missingIndices = checkRequiredIndices( + Optional missingIndices = checkRequiredIndices( jobId, clusterState, indicesOfInterest(params) @@ -186,7 +186,7 @@ public Optional getPotentialAssignment if (missingIndices.isPresent()) { return missingIndices; } - Optional staleMemory = checkMemoryFreshness(jobId, isMemoryTrackerRecentlyRefreshed); + Optional staleMemory = checkMemoryFreshness(jobId, isMemoryTrackerRecentlyRefreshed); if (staleMemory.isPresent()) { return staleMemory; } @@ -217,7 +217,7 @@ void setMaxNodeSize(ByteSizeValue maxNodeSize) { this.maxNodeMemory = maxNodeSize.getBytes(); } - public Optional checkRequiredIndices( + public Optional checkRequiredIndices( String jobId, ClusterState clusterState, String... indicesOfInterest @@ -235,18 +235,21 @@ public Optional checkRequiredIndices( + String.join(",", unavailableIndices) + "]"; logger.debug(reason); - return Optional.of(new PersistentTasksCustomMetadata.Assignment(null, reason)); + return Optional.of(new PersistentTasksMetadataSection.Assignment(null, reason)); } return Optional.empty(); } - public Optional checkMemoryFreshness(String jobId, boolean isMemoryTrackerRecentlyRefreshed) { + public Optional checkMemoryFreshness( + String jobId, + boolean isMemoryTrackerRecentlyRefreshed + ) { if (isMemoryTrackerRecentlyRefreshed == false) { boolean scheduledRefresh = memoryTracker.asyncRefresh(); if (scheduledRefresh) { String reason = "Not opening job [" + jobId + "] because job memory requirements are stale - refresh requested"; logger.debug(reason); - return Optional.of(new PersistentTasksCustomMetadata.Assignment(null, reason)); + return Optional.of(new PersistentTasksMetadataSection.Assignment(null, reason)); } } return Optional.empty(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java index c35b9da7b2bd2..78fc421aaafa6 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MachineLearningTests.java @@ -107,7 +107,7 @@ public void testPrePostSystemIndexUpgrade_givenAlreadyInUpgradeMode() throws IOE ClusterService clusterService = mock(ClusterService.class); when(clusterService.state()).thenReturn( ClusterState.builder(ClusterName.DEFAULT) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(true).build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(true).build())) .build() ); Client client = mock(Client.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java index 4e5e9f99fc306..3f0813870f570 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlAssignmentNotifierTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.job.config.JobState; @@ -75,13 +75,13 @@ public void testClusterChanged_assign() { ClusterState previous = ClusterState.builder(new ClusterName("_name")) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, Collections.emptyMap())) + .putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, Collections.emptyMap())) ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", "_node_id", null, tasksBuilder); - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState newState = ClusterState.builder(new ClusterName("_name")) .metadata(metadata) // set local node master @@ -122,9 +122,9 @@ public void testClusterChanged_unassign() { clusterService ); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", "_node_id", null, tasksBuilder); - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState previous = ClusterState.builder(new ClusterName("_name")) .metadata(metadata) // set local node master @@ -136,9 +136,9 @@ public void testClusterChanged_unassign() { ) .build(); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", null, null, tasksBuilder); - metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState newState = ClusterState.builder(new ClusterName("_name")) .metadata(metadata) // set local node master @@ -180,9 +180,9 @@ public void testClusterChanged_noPersistentTaskChanges() { clusterService ); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", null, null, tasksBuilder); - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState previous = ClusterState.builder(new ClusterName("_name")).metadata(metadata).build(); ClusterState newState = ClusterState.builder(new ClusterName("_name")) @@ -219,9 +219,9 @@ public void testAuditUnassignedMlTasks() { clusterService ); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", null, null, tasksBuilder); - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState newState = ClusterState.builder(new ClusterName("_name")) .metadata(metadata) // set local node master @@ -232,7 +232,7 @@ public void testAuditUnassignedMlTasks() { .masterNodeId("_node_id") ) .build(); - notifier.auditUnassignedMlTasks(newState.nodes(), newState.metadata().custom(PersistentTasksCustomMetadata.TYPE)); + notifier.auditUnassignedMlTasks(newState.nodes(), newState.metadata().section(PersistentTasksMetadataSection.TYPE)); if (anomalyDetectionAuditor.includeNodeInfo()) { verify(anomalyDetectionAuditor, times(1)).warning("job_id", "No node found to open job. Reasons [test assignment]"); } else { @@ -255,7 +255,7 @@ public void testFindLongTimeUnassignedTasks() { Instant twoHoursAgo = sevenHoursAgo.plus(Duration.ofHours(5)); Instant tomorrow = now.plus(Duration.ofHours(24)); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", "node1", JobState.OPENED, tasksBuilder); addJobTask("job2", "node1", JobState.OPENED, tasksBuilder); addJobTask("job3", null, JobState.OPENED, tasksBuilder); @@ -265,7 +265,7 @@ public void testFindLongTimeUnassignedTasks() { // Nothing reported because unassigned jobs only just detected assertThat(itemsToReport, empty()); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", null, JobState.OPENED, tasksBuilder); addJobTask("job2", "node1", JobState.OPENED, tasksBuilder); addJobTask("job3", null, JobState.OPENED, tasksBuilder); @@ -278,7 +278,7 @@ public void testFindLongTimeUnassignedTasks() { containsInAnyOrder("[xpack/ml/job]/[job3] unassigned for [3600] seconds", "[xpack/ml/job]/[job5] unassigned for [3600] seconds") ); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", null, JobState.OPENED, tasksBuilder); addJobTask("job2", null, JobState.OPENED, tasksBuilder); addJobTask("job3", null, JobState.OPENED, tasksBuilder); @@ -289,7 +289,7 @@ public void testFindLongTimeUnassignedTasks() { // job 2 only just detected unassigned assertThat(itemsToReport, contains("[xpack/ml/job]/[job1] unassigned for [18000] seconds")); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", null, JobState.OPENED, tasksBuilder); addJobTask("job2", null, JobState.OPENED, tasksBuilder); addJobTask("job3", null, JobState.OPENED, tasksBuilder); @@ -306,7 +306,7 @@ public void testFindLongTimeUnassignedTasks() { ) ); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", null, JobState.FAILED, tasksBuilder); addJobTask("job2", null, JobState.FAILED, tasksBuilder); addJobTask("job3", null, JobState.FAILED, tasksBuilder); @@ -331,14 +331,14 @@ public void testFindLongTimeUnassignedTasks_WithNullState() { { // run once with valid state to add unassigned job to the history - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", null, JobState.OPENED, tasksBuilder); List itemsToReport = notifier.findLongTimeUnassignedTasks(eightHoursAgo, tasksBuilder.build()); // Nothing reported because unassigned jobs only just detected assertThat(itemsToReport, empty()); } { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job1", null, null, tasksBuilder); // this time the job has no state // one hour later the job would be detected as unassigned if not for the missing state List itemsToReport = notifier.findLongTimeUnassignedTasks(sevenHoursAgo, tasksBuilder.build()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java index f79dd645bfea5..cad26c48ae0c4 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlDailyMaintenanceServiceTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskInfo; import org.elasticsearch.test.ESTestCase; @@ -332,8 +332,8 @@ private static ClusterState createClusterState(boolean isUpgradeMode) { return ClusterState.builder(new ClusterName("MlDailyMaintenanceServiceTests")) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, PersistentTasksCustomMetadata.builder().build()) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(isUpgradeMode).build()) + .putSection(PersistentTasksMetadataSection.TYPE, PersistentTasksMetadataSection.builder().build()) + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(isUpgradeMode).build()) ) .nodes(DiscoveryNodes.builder().build()) .build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java index bdabb42ecd467..e614fcf1219ab 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlLifeCycleServiceTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.transport.TransportAddress; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -78,34 +78,34 @@ public void setupMocks() { } public void testIsNodeSafeToShutdown() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.jobTaskId("job-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("job-1"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.datafeedTaskId("df1"), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams("df1", 0L), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( MlTasks.dataFrameAnalyticsTaskId("job-2"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams("foo-2", MlConfigVersion.CURRENT, true), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.addTask( MlTasks.snapshotUpgradeTaskId("job-3", "snapshot-3"), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams("job-3", "snapshot-3"), - new PersistentTasksCustomMetadata.Assignment("node-3", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-3", "test assignment") ); - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); Instant shutdownStartTime = Instant.now(); @@ -139,20 +139,20 @@ public void testIsNodeSafeToShutdown() { } public void testIsNodeSafeToShutdownGivenFailedTasks() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( MlTasks.jobTaskId("job-1"), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams("job-1"), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.updateTaskState(MlTasks.jobTaskId("job-1"), new JobTaskState(JobState.FAILED, 1, "testing", Instant.now())); tasksBuilder.addTask( MlTasks.dataFrameAnalyticsTaskId("job-2"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams("foo-2", MlConfigVersion.CURRENT, true), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.updateTaskState( MlTasks.dataFrameAnalyticsTaskId("job-2"), @@ -162,14 +162,14 @@ public void testIsNodeSafeToShutdownGivenFailedTasks() { MlTasks.snapshotUpgradeTaskId("job-3", "snapshot-3"), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams("job-3", "snapshot-3"), - new PersistentTasksCustomMetadata.Assignment("node-3", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-3", "test assignment") ); tasksBuilder.updateTaskState( MlTasks.snapshotUpgradeTaskId("job-3", "snapshot-3"), new SnapshotUpgradeTaskState(SnapshotUpgradeState.FAILED, 3, "testing") ); - Metadata metadata = Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()).build(); + Metadata metadata = Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()).build(); ClusterState clusterState = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); // For these tests it shouldn't matter when shutdown started or what the time is now, because it's always safe to shut down @@ -186,7 +186,7 @@ public void testIsNodeSafeToShutdownReturnsFalseWhenStartingDeploymentExists() { ClusterState currentState = ClusterState.builder(new ClusterName("test")) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -210,7 +210,7 @@ public void testIsNodeSafeToShutdownReturnsFalseWhenStoppingAndStoppedDeployment ClusterState currentState = ClusterState.builder(new ClusterName("test")) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -239,7 +239,7 @@ public void testIsNodeSafeToShutdownReturnsTrueWhenStoppedDeploymentsExist() { ClusterState currentState = ClusterState.builder(new ClusterName("test")) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java index 5fb1381b881ea..d11d816a138f1 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlMetricsTests.java @@ -12,7 +12,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -45,7 +45,7 @@ public class MlMetricsTests extends ESTestCase { public void testFindTaskStatuses() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); MlMemoryAutoscalingDeciderTests.addJobTask("job1", "node1", JobState.OPENED, tasksBuilder); MlMemoryAutoscalingDeciderTests.addJobTask("job2", "node1", JobState.OPENED, tasksBuilder); MlMemoryAutoscalingDeciderTests.addJobTask("job3", "node2", JobState.FAILED, tasksBuilder); @@ -86,7 +86,7 @@ public void testFindTaskStatuses() { public void testFindDfaMemoryUsage() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa1", "node1", DataFrameAnalyticsState.STARTED, tasksBuilder); MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa2", "node2", DataFrameAnalyticsState.STARTED, tasksBuilder); MlMemoryAutoscalingDeciderTests.addAnalyticsTask("dfa3", "node1", DataFrameAnalyticsState.FAILED, tasksBuilder); @@ -168,13 +168,13 @@ public static void addDatafeedTask( String datafeedId, String nodeId, DatafeedState datafeedState, - PersistentTasksCustomMetadata.Builder builder + PersistentTasksMetadataSection.Builder builder ) { builder.addTask( MlTasks.datafeedTaskId(datafeedId), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, System.currentTimeMillis()), - nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (datafeedState != null) { builder.updateTaskState(MlTasks.datafeedTaskId(datafeedId), datafeedState); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlUpgradeModeActionFilterTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlUpgradeModeActionFilterTests.java index 3092808dc91f8..89583b3e34627 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlUpgradeModeActionFilterTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/MlUpgradeModeActionFilterTests.java @@ -130,7 +130,7 @@ private static ClusterState createClusterState(boolean isUpgradeMode, boolean is return ClusterState.builder(new ClusterName("MlUpgradeModeActionFilterTests")) .metadata( Metadata.builder() - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(isUpgradeMode).isResetMode(isResetMode).build()) + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(isUpgradeMode).isResetMode(isResetMode).build()) ) .build(); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java index 67da449af850f..17d81483a888b 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportCloseJobActionTests.java @@ -17,8 +17,8 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -87,12 +87,12 @@ public void testAddJobAccordingToState() { List closingJobIds = new ArrayList<>(); List failedJobIds = new ArrayList<>(); - PersistentTasksCustomMetadata.Builder taskBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder taskBuilder = PersistentTasksMetadataSection.builder(); addJobTask("open-job", null, JobState.OPENED, taskBuilder); addJobTask("failed-job", null, JobState.FAILED, taskBuilder); addJobTask("closing-job", null, JobState.CLOSING, taskBuilder); addJobTask("opening-job", null, JobState.OPENING, taskBuilder); - PersistentTasksCustomMetadata tasks = taskBuilder.build(); + PersistentTasksMetadataSection tasks = taskBuilder.build(); for (String id : new String[] { "open-job", "closing-job", "opening-job", "failed-job" }) { TransportCloseJobAction.addJobAccordingToState(id, tasks, openJobIds, closingJobIds, failedJobIds); @@ -104,7 +104,7 @@ public void testAddJobAccordingToState() { @SuppressWarnings("unchecked") public void testStopDatafeedsIfNecessary() { - final PersistentTasksCustomMetadata.Builder datafeedStartedTaskBuilder = PersistentTasksCustomMetadata.builder(); + final PersistentTasksMetadataSection.Builder datafeedStartedTaskBuilder = PersistentTasksMetadataSection.builder(); String jobId = "job-with-started-df"; String datafeedId = "df1"; addJobTask(jobId, null, JobState.OPENED, datafeedStartedTaskBuilder); @@ -127,7 +127,7 @@ public void testStopDatafeedsIfNecessary() { assertTrue(responseHolder.get()); assertNull(exceptionHolder.get()); - final PersistentTasksCustomMetadata.Builder datafeedNotStartedTaskBuilder = PersistentTasksCustomMetadata.builder(); + final PersistentTasksMetadataSection.Builder datafeedNotStartedTaskBuilder = PersistentTasksMetadataSection.builder(); addJobTask(jobId, null, JobState.OPENED, datafeedNotStartedTaskBuilder); if (randomBoolean()) { addTask(datafeedId, 0L, null, DatafeedState.STOPPED, datafeedNotStartedTaskBuilder); @@ -142,7 +142,7 @@ public void testStopDatafeedsIfNecessary() { @SuppressWarnings("unchecked") public void testStopDatafeedsIfNecessaryWithForce() { - final PersistentTasksCustomMetadata.Builder datafeedStartedTaskBuilder = PersistentTasksCustomMetadata.builder(); + final PersistentTasksMetadataSection.Builder datafeedStartedTaskBuilder = PersistentTasksMetadataSection.builder(); String jobId = "job-with-started-df"; String datafeedId = "df1"; addJobTask(jobId, null, JobState.OPENED, datafeedStartedTaskBuilder); @@ -166,7 +166,7 @@ public void testStopDatafeedsIfNecessaryWithForce() { assertTrue(responseHolder.get()); assertNull(exceptionHolder.get()); - final PersistentTasksCustomMetadata.Builder datafeedNotStartedTaskBuilder = PersistentTasksCustomMetadata.builder(); + final PersistentTasksMetadataSection.Builder datafeedNotStartedTaskBuilder = PersistentTasksMetadataSection.builder(); addJobTask(jobId, null, JobState.OPENED, datafeedNotStartedTaskBuilder); if (randomBoolean()) { addTask(datafeedId, 0L, null, DatafeedState.STOPPED, datafeedNotStartedTaskBuilder); @@ -181,7 +181,7 @@ public void testStopDatafeedsIfNecessaryWithForce() { } public void testValidate_givenFailedJob() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id_failed", null, JobState.FAILED, tasksBuilder); mockDatafeedConfigFindDatafeeds(Collections.emptySet()); @@ -211,11 +211,11 @@ public void testValidate_givenFailedJob() { } public void testValidate_withSpecificJobIds() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id_closing", null, JobState.CLOSING, tasksBuilder); addJobTask("job_id_open-1", null, JobState.OPENED, tasksBuilder); addJobTask("job_id_open-2", null, JobState.OPENED, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); mockDatafeedConfigFindDatafeeds(Collections.emptySet()); @@ -251,11 +251,11 @@ public void testValidate_withSpecificJobIds() { public void testDoExecute_whenNothingToClose() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("foo", null, JobState.CLOSED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); TransportCloseJobAction transportAction = createAction(); @@ -290,7 +290,7 @@ public void testBuildWaitForCloseRequest() { List openJobIds = Arrays.asList("openjob1", "openjob2"); List closingJobIds = Collections.singletonList("closingjob1"); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("openjob1", null, JobState.OPENED, tasksBuilder); addJobTask("openjob2", null, JobState.OPENED, tasksBuilder); addJobTask("closingjob1", null, JobState.CLOSING, tasksBuilder); @@ -326,7 +326,7 @@ public static void addTask( long startTime, String nodeId, DatafeedState state, - PersistentTasksCustomMetadata.Builder tasks + PersistentTasksMetadataSection.Builder tasks ) { tasks.addTask( MlTasks.datafeedTaskId(datafeedId), diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java index 64d1414134f38..f32733322bf36 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDataFrameAnalyticsActionTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MachineLearningField; @@ -59,7 +59,7 @@ public void testGetAssignment_UpgradeModeIsEnabled() { TaskExecutor executor = createTaskExecutor(); TaskParams params = new TaskParams(JOB_ID, MlConfigVersion.CURRENT, false); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(true).build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(true).build())) .build(); Assignment assignment = executor.getAssignment(params, clusterState.nodes().getAllNodes(), clusterState); @@ -72,7 +72,7 @@ public void testGetAssignment_NoNodes() { TaskExecutor executor = createTaskExecutor(); TaskParams params = new TaskParams(JOB_ID, MlConfigVersion.CURRENT, false); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, new MlMetadata.Builder().build())) .build(); Assignment assignment = executor.getAssignment(params, clusterState.nodes().getAllNodes(), clusterState); @@ -85,7 +85,7 @@ public void testGetAssignment_NoMlNodes() { TaskExecutor executor = createTaskExecutor(); TaskParams params = new TaskParams(JOB_ID, MlConfigVersion.CURRENT, false); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, new MlMetadata.Builder().build())) .nodes( DiscoveryNodes.builder() .add(createNode(0, false, Version.CURRENT, MlConfigVersion.CURRENT)) @@ -112,7 +112,7 @@ public void testGetAssignment_MlNodeIsNewerThanTheMlJobButTheAssignmentSuceeds() TaskExecutor executor = createTaskExecutor(); TaskParams params = new TaskParams(JOB_ID, MlConfigVersion.V_7_9_0, false); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, new MlMetadata.Builder().build())) .nodes(DiscoveryNodes.builder().add(createNode(0, true, Version.V_7_10_0, MlConfigVersion.V_7_10_0))) .build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java index 8fd1082e0df5c..335497ad432fa 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStartDatafeedActionTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.TransportVersion; import org.elasticsearch.TransportVersions; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchModule; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.test.ESTestCase; @@ -30,7 +30,7 @@ import java.util.Date; import java.util.Map; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT; +import static org.elasticsearch.persistent.PersistentTasksMetadataSection.INITIAL_ASSIGNMENT; import static org.elasticsearch.xpack.ml.job.task.OpenJobPersistentTasksExecutorTests.addJobTask; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; @@ -52,7 +52,7 @@ protected NamedXContentRegistry xContentRegistry() { public void testValidate_jobClosed() { Job job1 = DatafeedRunnerTests.createDatafeedJob().build(new Date()); - PersistentTasksCustomMetadata tasks = PersistentTasksCustomMetadata.builder().build(); + PersistentTasksMetadataSection tasks = PersistentTasksMetadataSection.builder().build(); DatafeedConfig datafeedConfig1 = DatafeedRunnerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); Exception e = expectThrows( ElasticsearchStatusException.class, @@ -63,9 +63,9 @@ public void testValidate_jobClosed() { public void testValidate_jobOpening() { Job job1 = DatafeedRunnerTests.createDatafeedJob().build(new Date()); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", INITIAL_ASSIGNMENT.getExecutorNode(), null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedRunnerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks, xContentRegistry()); @@ -73,9 +73,9 @@ public void testValidate_jobOpening() { public void testValidate_jobOpened() { Job job1 = DatafeedRunnerTests.createDatafeedJob().build(new Date()); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job_id", INITIAL_ASSIGNMENT.getExecutorNode(), JobState.OPENED, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); DatafeedConfig datafeedConfig1 = DatafeedRunnerTests.createDatafeedConfig("foo-datafeed", "job_id").build(); TransportStartDatafeedAction.validate(job1, datafeedConfig1, tasks, xContentRegistry()); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java index d1d3338ce14ba..77591cc05717f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDataFrameAnalyticsActionTests.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -27,7 +27,7 @@ public class TransportStopDataFrameAnalyticsActionTests extends ESTestCase { public void testAnalyticsByTaskState_GivenEmpty() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); AnalyticsByTaskState analyticsByTaskState = AnalyticsByTaskState.build(Collections.emptySet(), tasksBuilder.build()); @@ -35,7 +35,7 @@ public void testAnalyticsByTaskState_GivenEmpty() { } public void testAnalyticsByTaskState_GivenAllStates() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addAnalyticsTask(tasksBuilder, "starting", "foo-node", null); addAnalyticsTask(tasksBuilder, "started", "foo-node", DataFrameAnalyticsState.STARTED); addAnalyticsTask(tasksBuilder, "reindexing", "foo-node", DataFrameAnalyticsState.REINDEXING); @@ -60,7 +60,7 @@ public void testAnalyticsByTaskState_GivenAllStates() { } private static void addAnalyticsTask( - PersistentTasksCustomMetadata.Builder builder, + PersistentTasksMetadataSection.Builder builder, String analyticsId, String nodeId, DataFrameAnalyticsState state @@ -69,7 +69,7 @@ private static void addAnalyticsTask( } private static void addAnalyticsTask( - PersistentTasksCustomMetadata.Builder builder, + PersistentTasksMetadataSection.Builder builder, String analyticsId, String nodeId, DataFrameAnalyticsState state, @@ -79,7 +79,7 @@ private static void addAnalyticsTask( MlTasks.dataFrameAnalyticsTaskId(analyticsId), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams(analyticsId, MlConfigVersion.CURRENT, allowLazyStart), - new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (state != null) { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java index c1dd5ce07e569..8d9282b08df67 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/action/TransportStopDatafeedActionTests.java @@ -6,7 +6,7 @@ */ package org.elasticsearch.xpack.ml.action; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.action.StartDatafeedAction; @@ -19,11 +19,11 @@ public class TransportStopDatafeedActionTests extends ESTestCase { public void testSortDatafeedIdsByTaskState_GivenDatafeedId() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder); addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); List startedDatafeeds = new ArrayList<>(); List stoppingDatafeeds = new ArrayList<>(); @@ -55,12 +55,12 @@ public void testSortDatafeedIdsByTaskState_GivenDatafeedId() { } public void testSortDatafeedIdsByTaskState_GivenAll() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addTask("datafeed_1", 0L, "node-1", DatafeedState.STARTED, tasksBuilder); addTask("datafeed_2", 0L, "node-1", DatafeedState.STOPPED, tasksBuilder); addTask("datafeed_3", 0L, "node-1", DatafeedState.STOPPING, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); List startedDatafeeds = new ArrayList<>(); List stoppingDatafeeds = new ArrayList<>(); @@ -94,13 +94,13 @@ public static void addTask( long startTime, String nodeId, DatafeedState state, - PersistentTasksCustomMetadata.Builder taskBuilder + PersistentTasksMetadataSection.Builder taskBuilder ) { taskBuilder.addTask( MlTasks.datafeedTaskId(datafeedId), MlTasks.DATAFEED_TASK_NAME, new StartDatafeedAction.DatafeedParams(datafeedId, startTime), - new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); taskBuilder.updateTaskState(MlTasks.datafeedTaskId(datafeedId), state); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java index 632730bc7f141..bdee92d967e4f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingDeciderServiceTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.TimeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingCapacity; @@ -221,10 +221,10 @@ public void testScale_GivenUndeterminedMemory_ShouldReturnNullCapacity() { service.onMaster(); String jobId = "a_job"; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask(jobId, randomFrom("ml-1", "ml-2"), JobState.OPENED, tasksBuilder); Metadata.Builder metadata = Metadata.builder(); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()); ClusterState clusterState = ClusterState.builder(new ClusterName("test")) .nodes( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerParameterizedTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerParameterizedTests.java index 229926e0c9afb..929c7ea78b977 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerParameterizedTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerParameterizedTests.java @@ -193,7 +193,7 @@ static ClusterState createClusterState(TrainedModelAssignmentMetadata trainedMod // TODO PersistentTasksCustomMetadata is required for jobs other than TrainedModels // .customs(Map.of(PersistentTasksCustomMetadata.TYPE, PersistentTasksCustomMetadata.builder().build())) if (trainedModelAssignmentMetadata != null) { - metadataBuilder.putCustom(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata); + metadataBuilder.putSection(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata); } Metadata metadata = metadataBuilder.build(); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java index 3674dda3934bd..5a108b369b057 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlAutoscalingResourceTrackerTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MachineLearningField; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -313,7 +313,7 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignment() throw String jobId = "lazy-job"; MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext( List.of( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( MlTasks.jobTaskId(jobId), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), @@ -338,7 +338,7 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignment() throw .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); when(mockTracker.getAnomalyDetectorJobMemoryRequirement(jobId)).thenReturn(memory / 4); @@ -436,8 +436,8 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignmentButFaile String jobId = "lazy-job"; MlAutoscalingContext mlAutoscalingContext = new MlAutoscalingContext( List.of( - new PersistentTasksCustomMetadata.PersistentTask<>( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( MlTasks.jobTaskId(jobId), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), @@ -464,7 +464,7 @@ public void testGetMemoryAndProcessorsScaleUpGivenAwaitingLazyAssignmentButFaile .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); when(mockTracker.getAnomalyDetectorJobMemoryRequirement(jobId)).thenReturn(memory / 4); @@ -1365,7 +1365,7 @@ public void testGetMemoryAndProcessorsScaleDown() throws InterruptedException { .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); @@ -1471,7 +1471,7 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByMinNodes() throws Inte .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); @@ -1564,7 +1564,7 @@ public void testGetMemoryAndProcessorsScaleDownPreventedByDummyEntityMemory() th .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); @@ -1668,7 +1668,7 @@ public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityProcesso .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); @@ -1766,7 +1766,7 @@ public void testGetMemoryAndProcessorsScaleDownNotPreventedByDummyEntityAsMemory .roles(Set.of(DiscoveryNodeRole.ML_ROLE)) .build() ), - PersistentTasksCustomMetadata.builder().build() + PersistentTasksMetadataSection.builder().build() ); MlMemoryTracker mockTracker = mock(MlMemoryTracker.class); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java index 970044c188849..528b5fd6ef8e5 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlMemoryAutoscalingDeciderTests.java @@ -21,7 +21,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.snapshots.SnapshotShardSizeInfo; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.autoscaling.capacity.AutoscalingCapacity; @@ -1274,11 +1274,11 @@ public void testScale_WithNoMlNodesButWaitingAnalytics() { final String analyticsId = "waiting-analytics"; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addAnalyticsTask(analyticsId, null, DataFrameAnalyticsState.STARTING, tasksBuilder); ClusterState.Builder clusterStateBuilder = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build()); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build()); clusterStateBuilder.metadata(metadata); ClusterState clusterState = clusterStateBuilder.build(); @@ -1324,7 +1324,7 @@ private static ClusterState clusterState( for (DiscoveryNode node : nodeList) { nodesBuilder.add(node); } - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); for (String jobId : ongoingAnomalyTasks) { OpenJobPersistentTasksExecutorTests.addJobTask( jobId, @@ -1347,7 +1347,7 @@ private static ClusterState clusterState( MlTasks.datafeedTaskId(jobId + "-datafeed"), MlTasks.DATAFEED_TASK_NAME, dfParams, - new PersistentTasksCustomMetadata.Assignment(nodeAssignment, "test") + new PersistentTasksMetadataSection.Assignment(nodeAssignment, "test") ); } for (String analyticsId : analyticsTasks) { @@ -1370,11 +1370,11 @@ private static ClusterState clusterState( for (String job : waitingAnomalyTasks) { addJobTask(job, null, null, tasksBuilder); } - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); cs.nodes(nodesBuilder); Metadata.Builder metadata = Metadata.builder(); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); return cs.build(); } @@ -1403,13 +1403,13 @@ public static void addAnalyticsTask( String jobId, String nodeId, DataFrameAnalyticsState jobState, - PersistentTasksCustomMetadata.Builder builder + PersistentTasksMetadataSection.Builder builder ) { builder.addTask( MlTasks.dataFrameAnalyticsTaskId(jobId), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams(jobId, MlConfigVersion.CURRENT, true), - nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (jobState != null) { builder.updateTaskState( @@ -1419,12 +1419,12 @@ public static void addAnalyticsTask( } } - public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksCustomMetadata.Builder builder) { + public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksMetadataSection.Builder builder) { builder.addTask( MlTasks.jobTaskId(jobId), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), - nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + nodeId == null ? AWAITING_LAZY_ASSIGNMENT : new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (jobState != null) { builder.updateTaskState( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java index ba40dc0bfdda7..bf6848b1466a0 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/autoscaling/MlProcessorAutoscalingDeciderTests.java @@ -62,7 +62,7 @@ public void testScale_GivenCurrentCapacityIsUsedExactly() { .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -138,7 +138,7 @@ public void testScale_GivenUnsatisfiedDeployments() { .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -214,7 +214,7 @@ public void testScale_GivenUnsatisfiedDeploymentIsLowPriority_ShouldNotScaleUp() .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -290,7 +290,7 @@ public void testScale_GivenMoreThanHalfProcessorsAreUsed() { .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -377,7 +377,7 @@ public void testScale_GivenDownScalePossible_DelayNotSatisfied() { .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -452,7 +452,7 @@ public void testScale_GivenDownScalePossible_DelaySatisfied() { .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -531,7 +531,7 @@ public void testScale_GivenLowPriorityDeploymentsOnly() { .nodes(DiscoveryNodes.builder().add(mlNode1).add(mlNode2).add(dataNode).build()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java index 4bb612921876e..3ee550e57563f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedNodeSelectorTests.java @@ -32,7 +32,7 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.TestIndexNameExpressionResolver; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlMetadata; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -65,7 +65,7 @@ public class DatafeedNodeSelectorTests extends ESTestCase { private IndexNameExpressionResolver resolver; private DiscoveryNodes nodes; private ClusterState clusterState; - private PersistentTasksCustomMetadata tasks; + private PersistentTasksMetadataSection tasks; private MlMetadata mlMetadata; @Before @@ -89,13 +89,13 @@ public void testSelectNode_GivenJobIsOpened() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -112,13 +112,13 @@ public void testSelectNode_GivenJobIsOpenedAndDataStream() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterStateWithDatastream("foo", 1, 0, Collections.singletonList(new Tuple<>(0, ShardRoutingState.STARTED))); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -135,13 +135,13 @@ public void testSelectNode_GivenJobIsOpening() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", null, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -160,11 +160,11 @@ public void testNoJobTask() { // Using wildcard index name to test for index resolving as well DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")); - tasks = PersistentTasksCustomMetadata.builder().build(); + tasks = PersistentTasksMetadataSection.builder().build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -204,14 +204,14 @@ public void testSelectNode_GivenJobFailedOrClosed() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); JobState jobState = randomFrom(JobState.FAILED, JobState.CLOSED); addJobTask(job.getId(), "node_id", jobState, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -253,7 +253,7 @@ public void testShardUnassigned() { // Using wildcard index name to test for index resolving as well DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); @@ -262,7 +262,7 @@ public void testShardUnassigned() { givenClusterState("foo", 1, 0, states); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -286,7 +286,7 @@ public void testShardNotAllActive() { // Using wildcard index name to test for index resolving as well DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("fo*")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); @@ -296,7 +296,7 @@ public void testShardNotAllActive() { givenClusterState("foo", 2, 0, states); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -318,13 +318,13 @@ public void testIndexDoesntExist() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -394,13 +394,13 @@ public void testIndexPatternDoesntExist() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Arrays.asList("missing-*", "foo*")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -417,13 +417,13 @@ public void testLocalIndexPatternWithoutMatchingIndicesAndRemoteIndexPattern() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Arrays.asList("missing-*", "remote:index-*")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -440,13 +440,13 @@ public void testRemoteIndex() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("remote:foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -462,7 +462,7 @@ public void testSelectNode_jobTaskStale() { DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); String nodeId = randomBoolean() ? "node_id2" : null; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), nodeId, JobState.OPENED, tasksBuilder); // Set to lower allocationId, so job task is stale: tasksBuilder.updateTaskState(MlTasks.jobTaskId(job.getId()), new JobTaskState(JobState.OPENED, 0, null, Instant.now())); @@ -472,7 +472,7 @@ public void testSelectNode_jobTaskStale() { Collection candidateNodes = makeCandidateNodes("node_id1", "node_id2", "node_id3"); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -502,7 +502,7 @@ public void testSelectNode_jobTaskStale() { ) ); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id1", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); @@ -526,7 +526,7 @@ public void testSelectNode_GivenJobOpeningAndIndexDoesNotExist() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("not_foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENING, tasksBuilder); tasks = tasksBuilder.build(); @@ -571,14 +571,14 @@ public void testSelectNode_GivenMlUpgradeMode() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); mlMetadata = new MlMetadata.Builder().isUpgradeMode(true).build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -593,14 +593,14 @@ public void testSelectNode_GivenResetInProgress() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); mlMetadata = new MlMetadata.Builder().isResetMode(true).build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -615,7 +615,7 @@ public void testCheckDatafeedTaskCanBeCreated_GivenMlUpgradeMode() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); mlMetadata = new MlMetadata.Builder().isUpgradeMode(true).build(); @@ -640,13 +640,13 @@ public void testSelectNode_GivenJobIsOpenedAndNodeIsShuttingDown() { Job job = createScheduledJob("job_id").build(new Date()); DatafeedConfig df = createDatafeed("datafeed_id", job.getId(), Collections.singletonList("foo")); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); tasks = tasksBuilder.build(); givenClusterState("foo", 1, 0); - PersistentTasksCustomMetadata.Assignment result = new DatafeedNodeSelector( + PersistentTasksMetadataSection.Assignment result = new DatafeedNodeSelector( clusterState, resolver, df.getId(), @@ -681,8 +681,8 @@ private void givenClusterState(String index, int numberOfShards, int numberOfRep clusterState = ClusterState.builder(new ClusterName("cluster_name")) .metadata( - new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks) - .putCustom(MlMetadata.TYPE, mlMetadata) + new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasks) + .putSection(MlMetadata.TYPE, mlMetadata) .put(indexMetadata, false) ) .nodes(nodes) @@ -706,8 +706,8 @@ private void givenClusterStateWithDatastream( clusterState = ClusterState.builder(new ClusterName("cluster_name")) .metadata( new Metadata.Builder().put(DataStreamTestHelper.newInstance(dataStreamName, Collections.singletonList(index))) - .putCustom(PersistentTasksCustomMetadata.TYPE, tasks) - .putCustom(MlMetadata.TYPE, mlMetadata) + .putSection(PersistentTasksMetadataSection.TYPE, tasks) + .putSection(MlMetadata.TYPE, mlMetadata) .put(indexMetadata, false) ) .nodes(nodes) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java index cba97835ee2f4..8c5eb55cc32af 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/datafeed/DatafeedRunnerTests.java @@ -21,8 +21,8 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.core.TimeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.Scheduler; import org.elasticsearch.threadpool.ThreadPool; @@ -90,9 +90,9 @@ public class DatafeedRunnerTests extends ESTestCase { public void setUpTests() { Job.Builder job = createDatafeedJob().setCreateTime(new Date()); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(job.getId(), "node_id", JobState.OPENED, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); DiscoveryNodes nodes = DiscoveryNodes.builder() .add( DiscoveryNodeUtils.create( @@ -105,7 +105,7 @@ public void setUpTests() { ) .build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("cluster_name")) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasks)) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasks)) .nodes(nodes); clusterService = mock(ClusterService.class); @@ -274,10 +274,10 @@ public void testStart_GivenNewlyCreatedJobLookBackAndRealtime() throws Exception } public void testDatafeedTaskWaitsUntilJobIsOpened() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENING, tasksBuilder); ClusterState cs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(cs); @@ -288,11 +288,11 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { // Verify datafeed has not started running yet as job is still opening verify(threadPool, never()).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENING, tasksBuilder); addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder); ClusterState anotherJobCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs, cs)); @@ -300,10 +300,10 @@ public void testDatafeedTaskWaitsUntilJobIsOpened() { // Still no run verify(threadPool, never()).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())); + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs)); @@ -315,10 +315,10 @@ public void testDatafeedTaskWaitsUntilAutodetectCommunicatorIsOpen() { hasOpenAutodetectCommunicator.set(false); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder); ClusterState cs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(cs); @@ -329,11 +329,11 @@ public void testDatafeedTaskWaitsUntilAutodetectCommunicatorIsOpen() { // Verify datafeed has not started running yet as job doesn't have an open autodetect communicator verify(threadPool, never()).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder); addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder); ClusterState anotherJobCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs, cs)); @@ -350,10 +350,10 @@ public void testDatafeedTaskWaitsUntilAutodetectCommunicatorIsOpen() { } public void testDatafeedTaskWaitsUntilJobIsNotStale() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder, true); ClusterState cs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(cs); @@ -364,11 +364,11 @@ public void testDatafeedTaskWaitsUntilJobIsNotStale() { // Verify datafeed has not started running yet as job is stale (i.e. even though opened it is part way through relocating) verify(threadPool, never()).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder, true); addJobTask("another_job", "node_id", JobState.OPENED, tasksBuilder); ClusterState anotherJobCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", anotherJobCs, cs)); @@ -376,10 +376,10 @@ public void testDatafeedTaskWaitsUntilJobIsNotStale() { // Still no run verify(threadPool, never()).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder jobOpenedCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())); + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", jobOpenedCs.build(), anotherJobCs)); @@ -388,10 +388,10 @@ public void testDatafeedTaskWaitsUntilJobIsNotStale() { } public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENING, tasksBuilder); ClusterState cs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(cs); @@ -402,10 +402,10 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { // Verify datafeed has not started running yet as job is still opening verify(threadPool, never()).executor(MachineLearning.DATAFEED_THREAD_POOL_NAME); - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.FAILED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())); + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", updatedCs.build(), cs)); @@ -415,10 +415,10 @@ public void testDatafeedTaskStopsBecauseJobFailedWhileOpening() { } public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENING, tasksBuilder); ClusterState cs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(cs); @@ -433,10 +433,10 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { datafeedRunner.stopDatafeed(task, "test", StopDatafeedAction.DEFAULT_TIMEOUT); // Update job state to opened - tasksBuilder = PersistentTasksCustomMetadata.builder(); + tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder); ClusterState.Builder updatedCs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())); + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())); capturedClusterStateListener.getValue().clusterChanged(new ClusterChangedEvent("_source", cs, updatedCs.build())); @@ -445,10 +445,10 @@ public void testDatafeedGetsStoppedWhileWaitingForJobToOpen() { } public void testDatafeedGetsStoppedWhileStarting() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(JOB_ID, "node_id", JobState.OPENED, tasksBuilder); ClusterState cs = ClusterState.builder(clusterService.state()) - .metadata(new Metadata.Builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(new Metadata.Builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(cs); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsServiceTests.java index a95c8a92d93ee..4ad54c1977070 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/TrainedModelStatsServiceTests.java @@ -211,7 +211,7 @@ public void testUpdateStatsUpgradeMode() { // now set the upgrade mode Metadata.Builder metadata = Metadata.builder() .put(indexMetadata) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(true).build()); + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(true).build()); ClusterState clusterState = ClusterState.builder(new ClusterName("upgrade-mode-test-upgrade-enabled")) .routingTable(routingTable) @@ -242,7 +242,7 @@ public void testUpdateStatsUpgradeMode() { Metadata.Builder metadata = Metadata.builder() .put(indexMetadata) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(false).build()); + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isUpgradeMode(false).build()); ClusterState clusterState = ClusterState.builder(new ClusterName("upgrade-mode-test-upgrade-disabled")) .routingTable(routingTable) @@ -300,7 +300,7 @@ public void testUpdateStatsResetMode() { // now set the upgrade mode Metadata.Builder metadata = Metadata.builder() .put(indexMetadata) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build()); + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build()); ClusterState clusterState = ClusterState.builder(new ClusterName("upgrade-mode-test-upgrade-enabled")) .routingTable(routingTable) @@ -331,7 +331,7 @@ public void testUpdateStatsResetMode() { Metadata.Builder metadata = Metadata.builder() .put(indexMetadata) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(false).build()); + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(false).build()); ClusterState clusterState = ClusterState.builder(new ClusterName("upgrade-mode-test-upgrade-disabled")) .routingTable(routingTable) diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java index 4aaddc91231f3..6778a29e1ec11 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/adaptiveallocations/AdaptiveAllocationsScalerServiceTests.java @@ -82,7 +82,7 @@ private ClusterState getClusterState(int numAllocations) { ClusterState clusterState = mock(ClusterState.class); Metadata metadata = mock(Metadata.class); when(clusterState.getMetadata()).thenReturn(metadata); - when(metadata.custom("trained_model_assignment")).thenReturn( + when(metadata.section("trained_model_assignment")).thenReturn( new TrainedModelAssignmentMetadata( Map.of( "test-deployment", diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java index 1dc44582492aa..b3f82e63c4687 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentClusterServiceTests.java @@ -40,7 +40,7 @@ import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.Strings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -273,7 +273,7 @@ public void testUpdateModelRoutingTable() { .putCompatibilityVersions(nodeId, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -387,7 +387,7 @@ public void testRemoveAssignment() { .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -422,7 +422,7 @@ public void testRemoveAllAssignments() { .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) + .putSection(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) .build() ) .build(); @@ -450,7 +450,7 @@ public void testCreateAssignment_GivenModelCannotByFullyAllocated_AndScalingIsPo ClusterState currentState = ClusterState.builder(new ClusterName("testCreateAssignment")) .nodes(discoveryNodes) - .metadata(Metadata.builder().putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata("ml-node-shutting-down"))) + .metadata(Metadata.builder().putSection(NodesShutdownMetadata.TYPE, shutdownMetadata("ml-node-shutting-down"))) .build(); TrainedModelAssignmentClusterService trainedModelAssignmentClusterService = createClusterService(5); @@ -499,7 +499,7 @@ public void testCreateAssignment_GivenModelCannotByFullyAllocated_AndScalingIsNo ); ClusterState currentState = ClusterState.builder(new ClusterName("testCreateAssignment")) .nodes(discoveryNodes) - .metadata(Metadata.builder().putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata("ml-node-shutting-down"))) + .metadata(Metadata.builder().putSection(NodesShutdownMetadata.TYPE, shutdownMetadata("ml-node-shutting-down"))) .build(); TrainedModelAssignmentClusterService trainedModelAssignmentClusterService = createClusterService(0); @@ -534,7 +534,7 @@ public void testCreateAssignmentWhileResetModeIsTrue() throws InterruptedExcepti ClusterState currentState = ClusterState.builder(new ClusterName("testCreateAssignment")) .nodes(discoveryNodes) .putCompatibilityVersions("ml-node-with-room", CompatibilityVersionsUtils.staticCurrent()) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build())) .build(); when(clusterService.state()).thenReturn(currentState); TrainedModelAssignmentClusterService trainedModelAssignmentClusterService = createClusterService(0); @@ -568,7 +568,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenPreviouslyShuttingDownNode_Is ClusterState stateWithShuttingDownNodeAndMlNode1 = createClusterState( List.of(shuttingDownNode, mlNode1), Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -578,7 +578,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenPreviouslyShuttingDownNode_Is ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(shuttingDownNode)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(shuttingDownNode)) .build() ); @@ -586,7 +586,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenPreviouslyShuttingDownNode_Is .nodes(DiscoveryNodes.builder(stateWithShuttingDownNodeAndMlNode1.nodes()).remove(shuttingDownNode).build()) .metadata( Metadata.builder(stateWithShuttingDownNodeAndMlNode1.metadata()) - .putCustom(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY) + .putSection(NodesShutdownMetadata.TYPE, NodesShutdownMetadata.EMPTY) .build() ) .build(); @@ -605,7 +605,7 @@ public void testHaveMlNodesChanged_ReturnsTrueWhenNodeShutsDownAndWasRoutedTo() ClusterState stateWithOneNode = createClusterState( List.of(mlNode1), Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -615,14 +615,14 @@ public void testHaveMlNodesChanged_ReturnsTrueWhenNodeShutsDownAndWasRoutedTo() ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode1)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode1)) .build() ); ClusterState stateWithTwoNodes = createClusterState( List.of(mlNode1, mlNode2), Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -649,7 +649,7 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenNodeShutsDownAndWasRoutedTo_B ClusterState stateWithOneNode = createClusterState( List.of(mlNode1), Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -659,14 +659,14 @@ public void testHaveMlNodesChanged_ReturnsFalseWhenNodeShutsDownAndWasRoutedTo_B ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode1)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode1)) .build() ); ClusterState stateWithTwoNodes = createClusterState( List.of(mlNode1, mlNode2), Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -710,7 +710,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(randomFrom(stateWithOneNodeNotMl, stateWithOneNode, stateWithTwoNodes)) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -733,14 +733,14 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(randomState) .metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) + .putSection(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) .build() ) .build(), ClusterState.builder(randomState) .metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) + .putSection(TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadataTests.randomInstance()) .build() ) .build() @@ -757,7 +757,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -769,7 +769,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNodeNotMl) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -791,7 +791,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -803,7 +803,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithTwoNodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -825,7 +825,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithTwoNodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -837,7 +837,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -859,7 +859,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithTwoNodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -874,7 +874,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -896,20 +896,20 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithTwoNodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode2)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(mlNode2)) .build() ) .build(), ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(model1, TrainedModelAssignment.Builder.empty(newParams(model1, 100), null)) @@ -931,7 +931,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -953,7 +953,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithTwoNodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -985,7 +985,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithOneNode) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1008,7 +1008,7 @@ public void testDetectReasonToRebalanceModels() { ClusterState.builder(stateWithTwoNodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1052,7 +1052,7 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { .build(); ClusterState fullyAllocated = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata( - Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).build() + Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).build() ).build(); // reallocate when the node is marked for shutdown @@ -1060,8 +1060,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { var currentState = ClusterState.builder(fullyAllocated) .metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) .build() ) .build(); @@ -1079,8 +1079,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // reallocated on the node shutdown change currentState = csBuilderWithNodes(clusterName, mlNode2, esNode1, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) .build() ).build(); @@ -1096,8 +1096,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // mlNode1 has returned but is still marked as shutdown currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) .build() ).build(); @@ -1112,7 +1112,7 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // mlNode1 no longer marked for shutdown currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata( - Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).build() + Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation).build() ).build(); assertThat( @@ -1127,8 +1127,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // now an ES node is marked for shutdown currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) .build() ).build(); @@ -1144,8 +1144,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // The ES node is removed currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) .build() ).build(); @@ -1161,8 +1161,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // The ES node returns currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) .build() ).build(); @@ -1178,8 +1178,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { // The ES node is no longer marked as shutdown currentState = csBuilderWithNodes(clusterName, mlNode1, mlNode2, esNode1, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(esNode1)) .build() ).build(); @@ -1194,8 +1194,8 @@ public void testDetectReasonToRebalanceModels_WithNodeShutdowns() { previousState = fullyAllocated; currentState = csBuilderWithNodes(clusterName, mlNode2, esNode1, esNode2, esNode3).metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) - .putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) + .putSection(TrainedModelAssignmentMetadata.NAME, fullModelAllocation) + .putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata(mlNode1)) .build() ).build(); @@ -1224,7 +1224,7 @@ public void testDetectReasonToRebalanceModels_GivenSingleMlJobStopped() { String mlNodeId = "ml-node-1"; DiscoveryNode mlNode = buildNode(mlNodeId, true, ByteSizeValue.ofGb(4).getBytes(), 8); - PersistentTasksCustomMetadata.Builder tasksWithJobBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksWithJobBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask( "anomaly-detection-job", mlNodeId, @@ -1236,8 +1236,8 @@ public void testDetectReasonToRebalanceModels_GivenSingleMlJobStopped() { .nodes(DiscoveryNodes.builder().add(mlNode)) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, tasksWithJobBuilder.build()) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, tasksWithJobBuilder.build()) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) @@ -1251,8 +1251,8 @@ public void testDetectReasonToRebalanceModels_GivenSingleMlJobStopped() { .nodes(DiscoveryNodes.builder().add(mlNode)) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, PersistentTasksCustomMetadata.builder().build()) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, PersistentTasksMetadataSection.builder().build()) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) @@ -1285,13 +1285,13 @@ public void testDetectReasonToRebalanceModels_GivenOutdatedAssignments() { ClusterState previousState = ClusterState.builder(new ClusterName("test_cluster")) .nodes(DiscoveryNodes.builder().add(mlNode)) - .metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, modelMetadata).build()) + .metadata(Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, modelMetadata).build()) .build(); // A non ML-node is added ClusterState currentState = ClusterState.builder(new ClusterName("test_cluster")) .nodes(DiscoveryNodes.builder().add(mlNode).add(buildNode("non-ml-node", false, ByteSizeValue.ofGb(4).getBytes(), 8))) - .metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, modelMetadata).build()) + .metadata(Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, modelMetadata).build()) .build(); assertThat( @@ -1307,7 +1307,7 @@ public void testDetectReasonToRebalanceModels_GivenMultipleMlJobsStopped() { String mlNodeId = "ml-node-1"; DiscoveryNode mlNode = buildNode(mlNodeId, true, ByteSizeValue.ofGb(4).getBytes(), 8); - PersistentTasksCustomMetadata.Builder previousTasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder previousTasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask( "anomaly-detection-job1", mlNodeId, @@ -1330,10 +1330,10 @@ public void testDetectReasonToRebalanceModels_GivenMultipleMlJobsStopped() { MlTasks.dataFrameAnalyticsTaskId("dfa-1"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams("dfa-1", MlConfigVersion.CURRENT, true), - new PersistentTasksCustomMetadata.Assignment(mlNodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(mlNodeId, "test assignment") ); - PersistentTasksCustomMetadata.Builder currentTasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder currentTasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask( "anomaly-detection-job2", mlNodeId, @@ -1351,8 +1351,8 @@ public void testDetectReasonToRebalanceModels_GivenMultipleMlJobsStopped() { .nodes(DiscoveryNodes.builder().add(mlNode)) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, previousTasksBuilder.build()) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, previousTasksBuilder.build()) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) @@ -1366,8 +1366,8 @@ public void testDetectReasonToRebalanceModels_GivenMultipleMlJobsStopped() { .nodes(DiscoveryNodes.builder().add(mlNode)) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, currentTasksBuilder.build()) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, currentTasksBuilder.build()) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) @@ -1390,7 +1390,7 @@ public void testDetectReasonToRebalanceModels_GivenMlJobsStarted() { String mlNodeId = "ml-node-1"; DiscoveryNode mlNode = buildNode(mlNodeId, true, ByteSizeValue.ofGb(4).getBytes(), 8); - PersistentTasksCustomMetadata.Builder previousTasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder previousTasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask( "anomaly-detection-job1", mlNodeId, @@ -1401,10 +1401,10 @@ public void testDetectReasonToRebalanceModels_GivenMlJobsStarted() { MlTasks.dataFrameAnalyticsTaskId("dfa-1"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams("dfa-1", MlConfigVersion.CURRENT, true), - new PersistentTasksCustomMetadata.Assignment(mlNodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(mlNodeId, "test assignment") ); - PersistentTasksCustomMetadata.Builder currentTasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder currentTasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask( "anomaly-detection-job1", mlNodeId, @@ -1421,15 +1421,15 @@ public void testDetectReasonToRebalanceModels_GivenMlJobsStarted() { MlTasks.dataFrameAnalyticsTaskId("dfa-1"), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams("dfa-1", MlConfigVersion.CURRENT, true), - new PersistentTasksCustomMetadata.Assignment(mlNodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(mlNodeId, "test assignment") ); ClusterState previousState = ClusterState.builder(new ClusterName("test_cluster")) .nodes(DiscoveryNodes.builder().add(mlNode)) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, previousTasksBuilder.build()) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, previousTasksBuilder.build()) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) @@ -1443,8 +1443,8 @@ public void testDetectReasonToRebalanceModels_GivenMlJobsStarted() { .nodes(DiscoveryNodes.builder().add(mlNode)) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, currentTasksBuilder.build()) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, currentTasksBuilder.build()) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment(modelId, TrainedModelAssignment.Builder.empty(newParams(modelId, 100), null)) @@ -1467,7 +1467,7 @@ public void testAreAssignedNodesRemoved_GivenRemovedNodeThatIsRouted() { String nodeId1 = "node-1"; String nodeId2 = "node-2"; Metadata metadata = Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1499,7 +1499,7 @@ public void testAreAssignedNodesRemoved_GivenRemovedNodeThatIsNotRouted() { String nodeId1 = "node-1"; String nodeId2 = "node-2"; Metadata metadata = Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1541,14 +1541,14 @@ public void testAreAssignedNodesRemoved_GivenShuttingDownNodeThatIsRouted() { DiscoveryNode node2 = buildNode(nodeId2, true, ByteSizeValue.ofGb(4).getBytes(), 8); ClusterState previousState = ClusterState.builder(new ClusterName("testAreAssignedNodesRemoved")) .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) - .metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata)) + .metadata(Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata)) .build(); ClusterState currentState = ClusterState.builder(new ClusterName("testAreAssignedNodesRemoved")) .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) .metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata) - .putCustom( + .putSection(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata) + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -1584,14 +1584,14 @@ public void testAreAssignedNodesRemoved_GivenShuttingDownNodeThatIsNotRouted() { DiscoveryNode node2 = buildNode(nodeId2, true, ByteSizeValue.ofGb(4).getBytes(), 8); ClusterState previousState = ClusterState.builder(new ClusterName("testAreAssignedNodesRemoved")) .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) - .metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata)) + .metadata(Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata)) .build(); ClusterState currentState = ClusterState.builder(new ClusterName("testAreAssignedNodesRemoved")) .nodes(DiscoveryNodes.builder().add(node1).add(node2).build()) .metadata( Metadata.builder() - .putCustom(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata) - .putCustom( + .putSection(TrainedModelAssignmentMetadata.NAME, trainedModelAssignmentMetadata) + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -1619,7 +1619,7 @@ public void testRemoveRoutingToUnassignableNodes_RemovesRouteForRemovedNodes() { String nodeId2 = "node-2"; String nodeId3 = "node-3"; Metadata metadata = Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1637,7 +1637,7 @@ public void testRemoveRoutingToUnassignableNodes_RemovesRouteForRemovedNodes() { .build() ) // This node should not affect the assignments because it is not routed to - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -1676,7 +1676,7 @@ public void testRemoveRoutingToUnassignableNodes_AddsAStoppingRouteForShuttingDo String nodeId2 = "node-2"; String nodeId3 = "node-3"; Metadata metadata = Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1695,7 +1695,7 @@ public void testRemoveRoutingToUnassignableNodes_AddsAStoppingRouteForShuttingDo ) .build() ) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -1736,7 +1736,7 @@ public void testRemoveRoutingToUnassignableNodes_IgnoresARouteThatIsStoppedForSh String nodeId2 = "node-2"; String nodeId3 = "node-3"; Metadata metadata = Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -1755,7 +1755,7 @@ public void testRemoveRoutingToUnassignableNodes_IgnoresARouteThatIsStoppedForSh ) .build() ) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -2016,7 +2016,7 @@ public void testSetAllocationToStopping() { .putCompatibilityVersions("test-node", CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -2067,7 +2067,7 @@ private void assertThatStoppingAssignmentPreventsMutation( } TrainedModelAssignmentMetadata metadataWithStopping = builder.build(); ClusterState originalWithStoppingAllocations = ClusterState.builder(original) - .metadata(Metadata.builder(original.metadata()).putCustom(TrainedModelAssignmentMetadata.NAME, metadataWithStopping).build()) + .metadata(Metadata.builder(original.metadata()).putSection(TrainedModelAssignmentMetadata.NAME, metadataWithStopping).build()) .build(); assertThat( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java index 9fbc2b43f1137..2924db164516d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/assignment/TrainedModelAssignmentNodeServiceTests.java @@ -348,7 +348,7 @@ public void testClusterChangedWithResetMode() throws InterruptedException { .nodes(nodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -368,7 +368,7 @@ public void testClusterChangedWithResetMode() throws InterruptedException { ) .build() ) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build()) + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(true).build()) .build() ) .build(), @@ -406,7 +406,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_CallsStop .nodes(nodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -416,7 +416,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_CallsStop ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) .build() ) .build(), @@ -459,7 +459,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_ButOtherA .nodes(nodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -470,7 +470,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNode_ButOtherA ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) .build() ) .build(), @@ -502,7 +502,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeButAlready .nodes(nodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -512,7 +512,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeButAlready ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) .build() ) .build(), @@ -543,7 +543,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeWithStarti .nodes(nodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -553,7 +553,7 @@ public void testClusterChanged_WhenAssigmentIsRoutedToShuttingDownNodeWithStarti ) .build() ) - .putCustom(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) + .putSection(NodesShutdownMetadata.TYPE, shutdownMetadata(NODE_ID)) .build() ) .build(), @@ -585,7 +585,7 @@ public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded .nodes(nodes) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -596,7 +596,7 @@ public void testClusterChanged_WhenAssigmentIsStopping_DoesNotAddModelToBeLoaded ) .build() ) - .putCustom(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(false).build()) + .putSection(MlMetadata.TYPE, new MlMetadata.Builder().isResetMode(false).build()) .build() ) .build(), @@ -634,7 +634,7 @@ public void testClusterChanged() throws Exception { .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -692,7 +692,7 @@ public void testClusterChanged() throws Exception { .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -746,7 +746,7 @@ public void testClusterChanged() throws Exception { .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -788,7 +788,7 @@ public void testClusterChanged_GivenAllStartedAssignments_AndNonMatchingTargetAl .putCompatibilityVersions(NODE_ID, CompatibilityVersionsUtils.staticCurrent()) .metadata( Metadata.builder() - .putCustom( + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( @@ -851,7 +851,7 @@ private void givenAssignmentsInClusterStateForModels(List deploymentIds, } ClusterState currentState = ClusterState.builder(new ClusterName("testLoadQueuedModels")) - .metadata(Metadata.builder().putCustom(TrainedModelAssignmentMetadata.NAME, builder.build()).build()) + .metadata(Metadata.builder().putSection(TrainedModelAssignmentMetadata.NAME, builder.build()).build()) .build(); when(clusterService.state()).thenReturn(currentState); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java index 9adbb3b3dd89a..a8109b172a9fb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/ingest/InferenceProcessorFactoryTests.java @@ -881,7 +881,7 @@ private static ClusterState builderClusterStateWithModelReferences(MlConfigVersi IngestMetadata ingestMetadata = new IngestMetadata(configurations); return ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(IngestMetadata.TYPE, ingestMetadata)) + .metadata(Metadata.builder().putSection(IngestMetadata.TYPE, ingestMetadata)) .nodes( DiscoveryNodes.builder() .add( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java index bab292671c0bc..e2d6a176d2425 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/inference/loadingservice/ModelLoadingServiceTests.java @@ -810,14 +810,14 @@ private static Metadata.Builder addIngest(Metadata.Builder builder, String... mo configurations.put("pipeline_with_model_" + id, newConfigurationWithInferenceProcessor(id)); } IngestMetadata ingestMetadata = new IngestMetadata(configurations); - return builder.putCustom(IngestMetadata.TYPE, ingestMetadata); + return builder.putSection(IngestMetadata.TYPE, ingestMetadata); } private static Metadata.Builder addAliases(Metadata.Builder builder, List> modelIdAndAliases) { ModelAliasMetadata modelAliasMetadata = new ModelAliasMetadata( modelIdAndAliases.stream().collect(Collectors.toMap(Tuple::v2, t -> new ModelAliasMetadata.ModelAliasEntry(t.v1()))) ); - return builder.putCustom(ModelAliasMetadata.NAME, modelAliasMetadata); + return builder.putSection(ModelAliasMetadata.NAME, modelAliasMetadata); } private static PipelineConfiguration newConfigurationWithInferenceProcessor(String modelId) throws IOException { diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java index cb10eead972ec..ddfc060905b2a 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobManagerTests.java @@ -24,7 +24,7 @@ import org.elasticsearch.env.TestEnvironment; import org.elasticsearch.index.analysis.AnalysisRegistry; import org.elasticsearch.indices.TestIndexNameExpressionResolver; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.SearchModule; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -122,7 +122,7 @@ public void testGetJobNotInIndexOrCluster() { MlMetadata.Builder mlMetadata = new MlMetadata.Builder(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(MlMetadata.TYPE, mlMetadata.build())) + .metadata(Metadata.builder().putSection(MlMetadata.TYPE, mlMetadata.build())) .build(); when(clusterService.state()).thenReturn(clusterState); @@ -182,13 +182,13 @@ public void testNotifyFilterChanged() throws IOException { Job.Builder jobWithoutFilter = buildJobBuilder("job-without-filter"); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask(jobReferencingFilter1.getId(), "node_id", JobState.OPENED, tasksBuilder); addJobTask(jobReferencingFilter2.getId(), "node_id", JobState.OPENED, tasksBuilder); addJobTask(jobWithoutFilter.getId(), "node_id", JobState.OPENED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); @@ -250,9 +250,9 @@ public void testNotifyFilterChangedGivenOnlyAddedItems() throws IOException { List docsAsBytes = Collections.singletonList(toBytesReference(jobReferencingFilter.build())); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); @@ -286,9 +286,9 @@ public void testNotifyFilterChangedGivenOnlyRemovedItems() throws IOException { jobReferencingFilter.setAnalysisConfig(filterAnalysisConfig); List docsAsBytes = Collections.singletonList(toBytesReference(jobReferencingFilter.build())); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); when(clusterService.state()).thenReturn(clusterState); @@ -311,13 +311,13 @@ public void testNotifyFilterChangedGivenOnlyRemovedItems() throws IOException { } public void testUpdateProcessOnCalendarChanged() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job-1", "node_id", JobState.OPENED, tasksBuilder); addJobTask("job-2", "node_id", JobState.OPENED, tasksBuilder); addJobTask("job-3", "node_id", JobState.OPENED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); @@ -345,13 +345,13 @@ public void testUpdateProcessOnCalendarChanged() { } public void testUpdateProcessOnCalendarChanged_GivenGroups() { - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); addJobTask("job-1", "node_id", JobState.OPENED, tasksBuilder); addJobTask("job-2", "node_id", JobState.OPENED, tasksBuilder); addJobTask("job-3", "node_id", JobState.OPENED, tasksBuilder); ClusterState clusterState = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); when(clusterService.state()).thenReturn(clusterState); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java index 112a8c80b0483..1c043c253c4eb 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/JobNodeSelectorTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.core.Tuple; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlConfigVersion; import org.elasticsearch.xpack.core.ml.MlTasks; @@ -144,7 +144,7 @@ public void testSelectLeastLoadedMlNodeForAnomalyDetectorJob_maxCapacityCountLim 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -192,7 +192,7 @@ public void testSelectLeastLoadedMlNodeForDataFrameAnalyticsJob_maxCapacityCount 0, node -> TransportStartDataFrameAnalyticsAction.TaskExecutor.nodeFilter(node, createTaskParams(dataFrameAnalyticsId)) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -246,7 +246,7 @@ public void testSelectLeastLoadedMlNodeForAnomalyDetectorJob_maxCapacityMemoryLi 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -302,7 +302,7 @@ public void testSelectLeastLoadedMlNodeForDataFrameAnalyticsJob_givenTaskHasNull 0, node -> TransportStartDataFrameAnalyticsAction.TaskExecutor.nodeFilter(node, createTaskParams(dataFrameAnalyticsId)) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -341,7 +341,7 @@ public void testSelectLeastLoadedMlNodeForAnomalyDetectorJob_firstJobTooBigMemor 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -400,7 +400,7 @@ public void testSelectLeastLoadedMlNodeForDataFrameAnalyticsJob_maxCapacityMemor 0, node -> TransportStartDataFrameAnalyticsAction.TaskExecutor.nodeFilter(node, createTaskParams(dataFrameAnalyticsId)) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -458,7 +458,7 @@ public void testSelectLeastLoadedMlNodeForDataFrameAnalyticsJob_firstJobTooBigMe 0, node -> TransportStartDataFrameAnalyticsAction.TaskExecutor.nodeFilter(node, createTaskParams(dataFrameAnalyticsId)) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -505,14 +505,14 @@ public void testSelectLeastLoadedMlNode_noMlNodes() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_id1", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id2", JOB_MEMORY_REQUIREMENT).build(new Date()); @@ -526,7 +526,7 @@ public void testSelectLeastLoadedMlNode_noMlNodes() { 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(20, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(20, 2, 30, MAX_JOB_BYTES, false); assertTrue(result.getExplanation().contains("node isn't a machine learning node")); assertNull(result.getExecutorNode()); } @@ -570,18 +570,18 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_id1", "_node_id1", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id2", "_node_id1", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id3", "_node_id2", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id4", "_node_id2", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id5", "_node_id3", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); csBuilder.nodes(nodes); Metadata.Builder metadata = Metadata.builder(); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); csBuilder.metadata(metadata); Job job6 = BaseMlIntegTestCase.createFareQuoteJob("job_id6", JOB_MEMORY_REQUIREMENT).build(new Date()); @@ -596,15 +596,15 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { 0, node -> nodeFilter(node, job6) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertEquals("_node_id3", result.getExecutorNode()); - tasksBuilder = PersistentTasksCustomMetadata.builder(tasks); + tasksBuilder = PersistentTasksMetadataSection.builder(tasks); OpenJobPersistentTasksExecutorTests.addJobTask(job6.getId(), "_node_id3", null, tasksBuilder); tasks = tasksBuilder.build(); csBuilder = ClusterState.builder(cs); - csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks)); + csBuilder.metadata(Metadata.builder(cs.metadata()).putSection(PersistentTasksMetadataSection.TYPE, tasks)); cs = csBuilder.build(); Job job7 = BaseMlIntegTestCase.createFareQuoteJob("job_id7", JOB_MEMORY_REQUIREMENT).build(new Date()); @@ -621,15 +621,15 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { assertNull("no node selected, because OPENING state", result.getExecutorNode()); assertTrue(result.getExplanation().contains("Node exceeds [2] the maximum number of jobs [2] in opening state")); - tasksBuilder = PersistentTasksCustomMetadata.builder(tasks); + tasksBuilder = PersistentTasksMetadataSection.builder(tasks); tasksBuilder.reassignTask( MlTasks.jobTaskId(job6.getId()), - new PersistentTasksCustomMetadata.Assignment("_node_id3", "test assignment") + new PersistentTasksMetadataSection.Assignment("_node_id3", "test assignment") ); tasks = tasksBuilder.build(); csBuilder = ClusterState.builder(cs); - csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks)); + csBuilder.metadata(Metadata.builder(cs.metadata()).putSection(PersistentTasksMetadataSection.TYPE, tasks)); cs = csBuilder.build(); jobNodeSelector = new JobNodeSelector( cs, @@ -644,12 +644,12 @@ public void testSelectLeastLoadedMlNode_maxConcurrentOpeningJobs() { assertNull("no node selected, because stale task", result.getExecutorNode()); assertTrue(result.getExplanation().contains("Node exceeds [2] the maximum number of jobs [2] in opening state")); - tasksBuilder = PersistentTasksCustomMetadata.builder(tasks); + tasksBuilder = PersistentTasksMetadataSection.builder(tasks); tasksBuilder.updateTaskState(MlTasks.jobTaskId(job6.getId()), null); tasks = tasksBuilder.build(); csBuilder = ClusterState.builder(cs); - csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks)); + csBuilder.metadata(Metadata.builder(cs.metadata()).putSection(PersistentTasksMetadataSection.TYPE, tasks)); cs = csBuilder.build(); jobNodeSelector = new JobNodeSelector( cs, @@ -704,24 +704,24 @@ public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_id1", "_node_id1", JobState.fromString("failed"), tasksBuilder); // This will make the assignment stale for job_id1 tasksBuilder.reassignTask( MlTasks.jobTaskId("job_id1"), - new PersistentTasksCustomMetadata.Assignment("_node_id1", "test assignment") + new PersistentTasksMetadataSection.Assignment("_node_id1", "test assignment") ); OpenJobPersistentTasksExecutorTests.addJobTask("job_id2", "_node_id1", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id3", "_node_id2", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id4", "_node_id2", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id5", "_node_id3", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id6", "_node_id3", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); csBuilder.nodes(nodes); Metadata.Builder metadata = Metadata.builder(); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); csBuilder.metadata(metadata); ClusterState cs = csBuilder.build(); @@ -737,15 +737,15 @@ public void testSelectLeastLoadedMlNode_concurrentOpeningJobsAndStaleFailedJob() 0, node -> nodeFilter(node, job7) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertEquals("_node_id1", result.getExecutorNode()); - tasksBuilder = PersistentTasksCustomMetadata.builder(tasks); + tasksBuilder = PersistentTasksMetadataSection.builder(tasks); OpenJobPersistentTasksExecutorTests.addJobTask("job_id7", "_node_id1", null, tasksBuilder); tasks = tasksBuilder.build(); csBuilder = ClusterState.builder(cs); - csBuilder.metadata(Metadata.builder(cs.metadata()).putCustom(PersistentTasksCustomMetadata.TYPE, tasks)); + csBuilder.metadata(Metadata.builder(cs.metadata()).putSection(PersistentTasksMetadataSection.TYPE, tasks)); cs = csBuilder.build(); Job job8 = BaseMlIntegTestCase.createFareQuoteJob("job_id8", JOB_MEMORY_REQUIREMENT).build(new Date()); jobNodeSelector = new JobNodeSelector( @@ -792,9 +792,9 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("incompatible_type_job", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); @@ -806,7 +806,7 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { when(job.getInitialResultsIndexName()).thenReturn("shared"); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); JobNodeSelector jobNodeSelector = new JobNodeSelector( cs.build(), @@ -817,7 +817,7 @@ public void testSelectLeastLoadedMlNode_noCompatibleJobTypeNodes() { 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertThat(result.getExplanation(), containsString("node does not support jobs of type [incompatible_type]")); assertNull(result.getExecutorNode()); } @@ -852,9 +852,9 @@ public void testSelectLeastLoadedMlNode_reasonsAreInDeterministicOrder() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("incompatible_type_job", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); @@ -866,7 +866,7 @@ public void testSelectLeastLoadedMlNode_reasonsAreInDeterministicOrder() { when(job.getInitialResultsIndexName()).thenReturn("shared"); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); JobNodeSelector jobNodeSelector = new JobNodeSelector( cs.build(), @@ -877,7 +877,7 @@ public void testSelectLeastLoadedMlNode_reasonsAreInDeterministicOrder() { 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertThat( result.getExplanation(), equalTo( @@ -932,9 +932,9 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_with_incompatible_model_snapshot", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); @@ -944,7 +944,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() .setModelSnapshotMinVersion(MlConfigVersion.fromString("7.3.0")) .build(new Date()); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); JobNodeSelector jobNodeSelector = new JobNodeSelector( cs.build(), @@ -955,7 +955,7 @@ public void testSelectLeastLoadedMlNode_noNodesMatchingModelSnapshotMinVersion() 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertThat( result.getExplanation(), containsString("job's model snapshot requires a node with ML config version [7.3.0] or higher") @@ -993,14 +993,14 @@ public void testSelectLeastLoadedMlNode_jobWithRules() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_with_rules", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); Job job = jobWithRules("job_with_rules"); @@ -1013,7 +1013,7 @@ public void testSelectLeastLoadedMlNode_jobWithRules() { 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertNotNull(result.getExecutorNode()); } @@ -1047,14 +1047,14 @@ public void testSelectMlNodeOnlyOutOfCandidates() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_with_rules", "_node_id1", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); DiscoveryNode candidate = nodes.getNodes().get(randomBoolean() ? "_node_id1" : "_node_id2"); @@ -1069,7 +1069,7 @@ public void testSelectMlNodeOnlyOutOfCandidates() { 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode(10, 2, 30, MAX_JOB_BYTES, false); assertNotNull(result.getExecutorNode()); assertThat(result.getExecutorNode(), equalTo(candidate.getId())); } @@ -1109,8 +1109,8 @@ public void testConsiderLazyAssignmentWithNoLazyNodes() { 0, node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.considerLazyAssignment( - new PersistentTasksCustomMetadata.Assignment(null, "foo"), + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.considerLazyAssignment( + new PersistentTasksMetadataSection.Assignment(null, "foo"), ByteSizeValue.ofGb(1).getBytes() ); assertEquals("foo", result.getExplanation()); @@ -1152,8 +1152,8 @@ public void testConsiderLazyAssignmentWithLazyNodes() { randomIntBetween(1, 3), node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.considerLazyAssignment( - new PersistentTasksCustomMetadata.Assignment(null, "foo"), + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.considerLazyAssignment( + new PersistentTasksMetadataSection.Assignment(null, "foo"), ByteSizeValue.ofGb(1).getBytes() ); assertEquals(JobNodeSelector.AWAITING_LAZY_ASSIGNMENT.getExplanation(), result.getExplanation()); @@ -1205,8 +1205,8 @@ public void testConsiderLazyAssignmentWithFilledLazyNodesAndVerticalScale() { randomIntBetween(1, 3), node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.considerLazyAssignment( - new PersistentTasksCustomMetadata.Assignment(null, "foo"), + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.considerLazyAssignment( + new PersistentTasksMetadataSection.Assignment(null, "foo"), ByteSizeValue.ofGb(64).getBytes() ); assertEquals(JobNodeSelector.AWAITING_LAZY_ASSIGNMENT.getExplanation(), result.getExplanation()); @@ -1242,7 +1242,7 @@ public void testMaximumPossibleNodeMemoryTooSmall() { randomIntBetween(1, 3), node -> nodeFilter(node, job) ); - PersistentTasksCustomMetadata.Assignment result = jobNodeSelector.selectNode( + PersistentTasksMetadataSection.Assignment result = jobNodeSelector.selectNode( maxRunningJobsPerNode, 2, maxMachineMemoryPercent, @@ -1320,13 +1320,13 @@ public void testPerceivedCapacityAndMaxFreeMemory() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("one_job", "filled_ml_node_id", null, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); Job job = BaseMlIntegTestCase.createFareQuoteJob("job_id2", JOB_MEMORY_REQUIREMENT).build(new Date()); @@ -1369,7 +1369,7 @@ private ClusterState.Builder fillNodesWithRunningJobs( ) { DiscoveryNodes.Builder nodes = DiscoveryNodes.builder(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); String[] jobIds = new String[numNodes * numRunningJobsPerNode]; for (int i = 0; i < numNodes; i++) { String nodeId = "_node_id" + i; @@ -1387,12 +1387,12 @@ private ClusterState.Builder fillNodesWithRunningJobs( } } } - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); ClusterState.Builder cs = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); cs.nodes(nodes); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, tasks); + metadata.putSection(PersistentTasksMetadataSection.TYPE, tasks); cs.metadata(metadata); return cs; @@ -1408,7 +1408,7 @@ static void addDataFrameAnalyticsJobTask( String id, String nodeId, DataFrameAnalyticsState state, - PersistentTasksCustomMetadata.Builder builder + PersistentTasksMetadataSection.Builder builder ) { addDataFrameAnalyticsJobTask(id, nodeId, state, builder, false, false); } @@ -1417,7 +1417,7 @@ static void addDataFrameAnalyticsJobTask( String id, String nodeId, DataFrameAnalyticsState state, - PersistentTasksCustomMetadata.Builder builder, + PersistentTasksMetadataSection.Builder builder, boolean isStale, boolean allowLazyStart ) { @@ -1425,7 +1425,7 @@ static void addDataFrameAnalyticsJobTask( MlTasks.dataFrameAnalyticsTaskId(id), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams(id, MlConfigVersion.CURRENT, allowLazyStart), - new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (state != null) { builder.updateTaskState( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java index c3ad54427f70c..b45ea00cf7442 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/NodeLoadDetectorTests.java @@ -14,7 +14,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.common.transport.TransportAddress; import org.elasticsearch.common.unit.ByteSizeValue; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.action.StartTrainedModelDeploymentAction; import org.elasticsearch.xpack.core.ml.inference.assignment.Priority; @@ -104,19 +104,19 @@ public void testNodeLoadDetection() { ) .build(); - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); OpenJobPersistentTasksExecutorTests.addJobTask("job_id1", "_node_id1", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id2", "_node_id1", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id3", "_node_id2", null, tasksBuilder); OpenJobPersistentTasksExecutorTests.addJobTask("job_id4", "_node_id4", JobState.OPENED, tasksBuilder); - PersistentTasksCustomMetadata tasks = tasksBuilder.build(); + PersistentTasksMetadataSection tasks = tasksBuilder.build(); final ClusterState cs = ClusterState.builder(new ClusterName("_name")) .nodes(nodes) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, tasks) - .putCustom( + .putSection(PersistentTasksMetadataSection.TYPE, tasks) + .putSection( TrainedModelAssignmentMetadata.NAME, TrainedModelAssignmentMetadata.Builder.empty() .addNewAssignment( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicateTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicateTests.java index 2e029ed9c58c1..6d4e13444f87f 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicateTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/snapshot/upgrader/SnapshotUpgradePredicateTests.java @@ -7,8 +7,8 @@ package org.elasticsearch.xpack.ml.job.snapshot.upgrader; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xpack.core.ml.MlTasks; import org.elasticsearch.xpack.core.ml.job.snapshot.upgrade.SnapshotUpgradeState; @@ -27,7 +27,7 @@ public void testWhenWaitForCompletionIsTrue() { MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams("job", "snapshot"), 1, - new PersistentTasksCustomMetadata.Assignment("test-node", "") + new PersistentTasksMetadataSection.Assignment("test-node", "") ); { SnapshotUpgradePredicate snapshotUpgradePredicate = new SnapshotUpgradePredicate(true, logger); @@ -65,7 +65,7 @@ public void testWhenWaitForCompletionIsFalse() { MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams("job", "snapshot"), 1, - new PersistentTasksCustomMetadata.Assignment("test-node", "") + new PersistentTasksMetadataSection.Assignment("test-node", "") ); { SnapshotUpgradePredicate snapshotUpgradePredicate = new SnapshotUpgradePredicate(false, logger); diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java index 0440a66bdbcaa..bc21e6762ee4d 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/job/task/OpenJobPersistentTasksExecutorTests.java @@ -33,7 +33,7 @@ import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.TestIndexNameExpressionResolver; import org.elasticsearch.license.XPackLicenseState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; @@ -189,7 +189,7 @@ public void testGetAssignment_GivenLazyJobAndNoGlobalLazyNodes() { when(job.allowLazyOpen()).thenReturn(true); OpenJobAction.JobParams params = new OpenJobAction.JobParams("lazy_job"); params.setJob(job); - PersistentTasksCustomMetadata.Assignment assignment = executor.getAssignment( + PersistentTasksMetadataSection.Assignment assignment = executor.getAssignment( params, csBuilder.nodes().getAllNodes(), csBuilder.build() @@ -203,14 +203,14 @@ public void testGetAssignment_GivenResetInProgress() { ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")); Metadata.Builder metadata = Metadata.builder(); MlMetadata mlMetadata = new MlMetadata.Builder().isResetMode(true).build(); - csBuilder.metadata(metadata.putCustom(MlMetadata.TYPE, mlMetadata)); + csBuilder.metadata(metadata.putSection(MlMetadata.TYPE, mlMetadata)); OpenJobPersistentTasksExecutor executor = createExecutor(Settings.EMPTY); Job job = mock(Job.class); OpenJobAction.JobParams params = new OpenJobAction.JobParams("job_during_reset"); params.setJob(job); - PersistentTasksCustomMetadata.Assignment assignment = executor.getAssignment( + PersistentTasksMetadataSection.Assignment assignment = executor.getAssignment( params, csBuilder.nodes().getAllNodes(), csBuilder.build() @@ -220,7 +220,7 @@ public void testGetAssignment_GivenResetInProgress() { assertEquals(MlTasks.RESET_IN_PROGRESS.getExplanation(), assignment.getExplanation()); } - public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksCustomMetadata.Builder builder) { + public static void addJobTask(String jobId, String nodeId, JobState jobState, PersistentTasksMetadataSection.Builder builder) { addJobTask(jobId, nodeId, jobState, builder, false); } @@ -228,14 +228,14 @@ public static void addJobTask( String jobId, String nodeId, JobState jobState, - PersistentTasksCustomMetadata.Builder builder, + PersistentTasksMetadataSection.Builder builder, boolean isStale ) { builder.addTask( MlTasks.jobTaskId(jobId), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), - new PersistentTasksCustomMetadata.Assignment(nodeId, "test assignment") + new PersistentTasksMetadataSection.Assignment(nodeId, "test assignment") ); if (jobState != null) { builder.updateTaskState( diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java index 2769069a20314..bf2218ef0ee67 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/process/MlMemoryTrackerTests.java @@ -15,7 +15,7 @@ import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.ml.MlConfigVersion; @@ -97,12 +97,12 @@ public void testRefreshAll() { memoryTracker.offMaster(); } - Map> tasks = new HashMap<>(); + Map> tasks = new HashMap<>(); int numAnomalyDetectorJobTasks = randomIntBetween(2, 5); for (int i = 1; i <= numAnomalyDetectorJobTasks; ++i) { String jobId = "job" + i; - PersistentTasksCustomMetadata.PersistentTask task = makeTestAnomalyDetectorTask(jobId); + PersistentTasksMetadataSection.PersistentTask task = makeTestAnomalyDetectorTask(jobId); tasks.put(task.getId(), task); } @@ -111,7 +111,7 @@ public void testRefreshAll() { for (int i = numAnomalyDetectorJobTasks; i < numAnomalyDetectorJobTasks + numSnapshotUpgradeTasks; ++i) { String jobId = "job" + i; String snapshotId = Long.toString(randomLongBetween(1000000000L, 9999999999L)); - PersistentTasksCustomMetadata.PersistentTask task = makeTestSnapshotUpgradeTask(jobId, snapshotId); + PersistentTasksMetadataSection.PersistentTask task = makeTestSnapshotUpgradeTask(jobId, snapshotId); tasks.put(task.getId(), task); } @@ -120,11 +120,11 @@ public void testRefreshAll() { for (int i = 1; i <= numDataFrameAnalyticsTasks; ++i) { String id = "analytics" + i; allIds.add(id); - PersistentTasksCustomMetadata.PersistentTask task = makeTestDataFrameAnalyticsTask(id, false); + PersistentTasksMetadataSection.PersistentTask task = makeTestDataFrameAnalyticsTask(id, false); tasks.put(task.getId(), task); } - PersistentTasksCustomMetadata persistentTasks = new PersistentTasksCustomMetadata( + PersistentTasksMetadataSection persistentTasks = new PersistentTasksMetadataSection( numAnomalyDetectorJobTasks + numSnapshotUpgradeTasks + numDataFrameAnalyticsTasks, tasks ); @@ -165,7 +165,7 @@ public void testRefreshWithSkips() { memoryTracker.offMaster(); } - Map> tasks = new HashMap<>(); + Map> tasks = new HashMap<>(); Set toSkip = new HashSet<>(); @@ -175,7 +175,7 @@ public void testRefreshWithSkips() { if (randomBoolean()) { toSkip.add(jobId); } - PersistentTasksCustomMetadata.PersistentTask task = makeTestAnomalyDetectorTask(jobId); + PersistentTasksMetadataSection.PersistentTask task = makeTestAnomalyDetectorTask(jobId); tasks.put(task.getId(), task); } @@ -184,7 +184,7 @@ public void testRefreshWithSkips() { for (int i = numAnomalyDetectorJobTasks; i < numAnomalyDetectorJobTasks + numSnapshotUpgradeTasks; ++i) { String jobId = "job" + i; String snapshotId = Long.toString(randomLongBetween(1000000000L, 9999999999L)); - PersistentTasksCustomMetadata.PersistentTask task = makeTestSnapshotUpgradeTask(jobId, snapshotId); + PersistentTasksMetadataSection.PersistentTask task = makeTestSnapshotUpgradeTask(jobId, snapshotId); tasks.put(task.getId(), task); } @@ -193,11 +193,11 @@ public void testRefreshWithSkips() { for (int i = 1; i <= numDataFrameAnalyticsTasks; ++i) { String id = "analytics" + i; allIds.add(id); - PersistentTasksCustomMetadata.PersistentTask task = makeTestDataFrameAnalyticsTask(id, false); + PersistentTasksMetadataSection.PersistentTask task = makeTestDataFrameAnalyticsTask(id, false); tasks.put(task.getId(), task); } - PersistentTasksCustomMetadata persistentTasks = new PersistentTasksCustomMetadata( + PersistentTasksMetadataSection persistentTasks = new PersistentTasksMetadataSection( numAnomalyDetectorJobTasks + numSnapshotUpgradeTasks + numDataFrameAnalyticsTasks, tasks ); @@ -235,30 +235,30 @@ public void testRefreshWithSkips() { public void testRefreshAllFailure() { - Map> tasks = new HashMap<>(); + Map> tasks = new HashMap<>(); int numAnomalyDetectorJobTasks = randomIntBetween(2, 5); for (int i = 1; i <= numAnomalyDetectorJobTasks; ++i) { String jobId = "job" + i; - PersistentTasksCustomMetadata.PersistentTask task = makeTestAnomalyDetectorTask(jobId); + PersistentTasksMetadataSection.PersistentTask task = makeTestAnomalyDetectorTask(jobId); tasks.put(task.getId(), task); } int numSnapshotUpgradeTasks = randomIntBetween(1, 3); for (int i = numAnomalyDetectorJobTasks; i < numAnomalyDetectorJobTasks + numSnapshotUpgradeTasks; ++i) { String jobId = "job" + i; - PersistentTasksCustomMetadata.PersistentTask task = makeTestAnomalyDetectorTask(jobId); + PersistentTasksMetadataSection.PersistentTask task = makeTestAnomalyDetectorTask(jobId); tasks.put(task.getId(), task); } int numDataFrameAnalyticsTasks = randomIntBetween(2, 5); for (int i = 1; i <= numDataFrameAnalyticsTasks; ++i) { String id = "analytics" + i; - PersistentTasksCustomMetadata.PersistentTask task = makeTestDataFrameAnalyticsTask(id, false); + PersistentTasksMetadataSection.PersistentTask task = makeTestDataFrameAnalyticsTask(id, false); tasks.put(task.getId(), task); } - PersistentTasksCustomMetadata persistentTasks = new PersistentTasksCustomMetadata( + PersistentTasksMetadataSection persistentTasks = new PersistentTasksMetadataSection( numAnomalyDetectorJobTasks + numSnapshotUpgradeTasks + numDataFrameAnalyticsTasks, tasks ); @@ -385,39 +385,39 @@ public void testMaxDuration() { assertThat(MlMemoryTracker.max(Duration.ofMinutes(5), Duration.ofMinutes(5)), equalTo(Duration.ofMinutes(5))); } - private PersistentTasksCustomMetadata.PersistentTask makeTestAnomalyDetectorTask(String jobId) { - return new PersistentTasksCustomMetadata.PersistentTask<>( + private PersistentTasksMetadataSection.PersistentTask makeTestAnomalyDetectorTask(String jobId) { + return new PersistentTasksMetadataSection.PersistentTask<>( MlTasks.jobTaskId(jobId), MlTasks.JOB_TASK_NAME, new OpenJobAction.JobParams(jobId), 0, - PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT + PersistentTasksMetadataSection.INITIAL_ASSIGNMENT ); } - private PersistentTasksCustomMetadata.PersistentTask makeTestSnapshotUpgradeTask( + private PersistentTasksMetadataSection.PersistentTask makeTestSnapshotUpgradeTask( String jobId, String snapshotId ) { - return new PersistentTasksCustomMetadata.PersistentTask<>( + return new PersistentTasksMetadataSection.PersistentTask<>( MlTasks.snapshotUpgradeTaskId(jobId, snapshotId), MlTasks.JOB_SNAPSHOT_UPGRADE_TASK_NAME, new SnapshotUpgradeTaskParams(jobId, snapshotId), 0, - PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT + PersistentTasksMetadataSection.INITIAL_ASSIGNMENT ); } - private PersistentTasksCustomMetadata.PersistentTask makeTestDataFrameAnalyticsTask( + private PersistentTasksMetadataSection.PersistentTask makeTestDataFrameAnalyticsTask( String id, boolean allowLazyStart ) { - return new PersistentTasksCustomMetadata.PersistentTask<>( + return new PersistentTasksMetadataSection.PersistentTask<>( MlTasks.dataFrameAnalyticsTaskId(id), MlTasks.DATA_FRAME_ANALYTICS_TASK_NAME, new StartDataFrameAnalyticsAction.TaskParams(id, MlConfigVersion.CURRENT, allowLazyStart), 0, - PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT + PersistentTasksMetadataSection.INITIAL_ASSIGNMENT ); } diff --git a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java index 5aaaa3ff958fd..fdde4f71e8e3c 100644 --- a/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java +++ b/x-pack/plugin/ml/src/test/java/org/elasticsearch/xpack/ml/support/BaseMlIntegTestCase.java @@ -38,7 +38,7 @@ import org.elasticsearch.ingest.common.IngestCommonPlugin; import org.elasticsearch.license.LicenseSettings; import org.elasticsearch.persistent.PersistentTasksClusterService; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.plugins.Plugin; import org.elasticsearch.reindex.ReindexPlugin; import org.elasticsearch.script.IngestScript; @@ -515,9 +515,9 @@ protected String awaitJobOpenedAndAssigned(String jobId, String queryNode) throw protected void assertRecentLastTaskStateChangeTime(String taskId, Duration howRecent, String queryNode) { ClusterStateRequest csRequest = new ClusterStateRequest().clear().metadata(true); ClusterStateResponse csResponse = client(queryNode).execute(ClusterStateAction.INSTANCE, csRequest).actionGet(); - PersistentTasksCustomMetadata tasks = csResponse.getState().getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = csResponse.getState().getMetadata().section(PersistentTasksMetadataSection.TYPE); assertNotNull(tasks); - PersistentTasksCustomMetadata.PersistentTask task = tasks.getTask(taskId); + PersistentTasksMetadataSection.PersistentTask task = tasks.getTask(taskId); assertNotNull(task); assertThat(task.getState(), instanceOf(MlTaskState.class)); MlTaskState state = (MlTaskState) task.getState(); diff --git a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java index 962f76b9e580b..f2239a96f4933 100644 --- a/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java +++ b/x-pack/plugin/monitoring/src/test/java/org/elasticsearch/xpack/monitoring/MonitoringTemplateRegistryTests.java @@ -417,7 +417,7 @@ private ClusterState createClusterState( Metadata.builder() .indexTemplates(composableTemplates) .transientSettings(nodeSettings) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java index 79227f3dd2cee..e2be93ad3a361 100644 --- a/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java +++ b/x-pack/plugin/old-lucene-versions/src/internalClusterTest/java/org/elasticsearch/xpack/lucene/bwc/ArchiveLicenseIntegTests.java @@ -131,7 +131,7 @@ public void testShardAllocationOnInvalidLicense() throws Exception { // We force this by clearing the existing basic license first updateClusterState( currentState -> ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.metadata()).removeCustom(LicensesMetadata.TYPE).build()) + .metadata(Metadata.builder(currentState.metadata()).removeSection(LicensesMetadata.TYPE).build()) .build() ); diff --git a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java index 3b361748abf67..2ac95e975bb61 100644 --- a/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java +++ b/x-pack/plugin/profiling/src/main/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistry.java @@ -324,7 +324,7 @@ public static boolean isAllResourcesCreated(ClusterState state, Settings setting } } if (isDataStreamsLifecycleOnlyMode(settings) == false) { - IndexLifecycleMetadata ilmMetadata = state.metadata().custom(IndexLifecycleMetadata.TYPE); + IndexLifecycleMetadata ilmMetadata = state.metadata().section(IndexLifecycleMetadata.TYPE); if (ilmMetadata == null) { return false; } diff --git a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistryTests.java b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistryTests.java index 81d6ed15804b6..e3daf4152ae37 100644 --- a/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistryTests.java +++ b/x-pack/plugin/profiling/src/test/java/org/elasticsearch/xpack/profiling/persistence/ProfilingIndexTemplateRegistryTests.java @@ -484,7 +484,7 @@ private ClusterState createClusterState( .componentTemplates(componentTemplates) .indexTemplates(composableTemplates) .transientSettings(nodeSettings) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java index 63264a2a9dd02..24ee412567537 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/RollupUsageTransportAction.java @@ -13,7 +13,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.core.Predicates; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -58,7 +58,7 @@ protected void masterOperation( static int findNumberOfRollupJobs(ClusterState state) { int numberOfRollupJobs = 0; - PersistentTasksCustomMetadata persistentTasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection persistentTasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); if (persistentTasks != null) { numberOfRollupJobs = persistentTasks.findTasks(RollupJob.NAME, Predicates.always()).size(); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java index 0d5a9c86cc3b8..6bc530fff5631 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportDeleteRollupJobAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportResponseHandler; @@ -56,7 +56,7 @@ protected void doExecute(Task task, DeleteRollupJobAction.Request request, Actio final DiscoveryNodes nodes = state.nodes(); if (nodes.isLocalNodeElectedMaster()) { - PersistentTasksCustomMetadata pTasksMeta = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection pTasksMeta = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (pTasksMeta != null && pTasksMeta.getTask(request.getId()) != null) { super.doExecute(task, request, listener); } else { diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java index 12cea1c305020..fdba8159f6d24 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportGetRollupJobAction.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.TransportResponseHandler; @@ -93,7 +93,7 @@ protected void doExecute(Task task, GetRollupJobsAction.Request request, ActionL */ static boolean stateHasRollupJobs(GetRollupJobsAction.Request request, ClusterState state) { boolean hasRollupJobs = false; - PersistentTasksCustomMetadata pTasksMeta = state.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection pTasksMeta = state.getMetadata().section(PersistentTasksMetadataSection.TYPE); if (pTasksMeta != null) { // If the request was for _all rollup jobs, we need to look through the list of diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java index 6618f3199debf..69b0ab0c099ae 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/action/TransportPutRollupJobAction.java @@ -40,7 +40,7 @@ import org.elasticsearch.core.CheckedConsumer; import org.elasticsearch.core.TimeValue; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -346,7 +346,7 @@ private static void waitForRollupStarted( job.getConfig().getTimeout(), new PersistentTasksService.WaitForPersistentTaskListener() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { listener.onResponse(AcknowledgedResponse.TRUE); } diff --git a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java index f4c420db47ac3..c363b55a83ef4 100644 --- a/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java +++ b/x-pack/plugin/rollup/src/main/java/org/elasticsearch/xpack/rollup/job/RollupJobTask.java @@ -23,8 +23,8 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; @@ -88,7 +88,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Map headers ) { return new RollupJobTask( diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetJobsActionRequestTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetJobsActionRequestTests.java index e319925973174..38afee30dc8cb 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetJobsActionRequestTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/GetJobsActionRequestTests.java @@ -11,7 +11,7 @@ import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.AbstractWireSerializingTestCase; import org.elasticsearch.xpack.core.rollup.ConfigTestHelpers; import org.elasticsearch.xpack.core.rollup.action.GetRollupJobsAction; @@ -46,7 +46,7 @@ public void testStateCheckNoPersistentTasks() { ClusterState state = ClusterState.builder(new ClusterName("_name")) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, Collections.emptyMap())) + .putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, Collections.emptyMap())) ) .build(); boolean hasRollupJobs = TransportGetRollupJobAction.stateHasRollupJobs(request, state); @@ -58,7 +58,7 @@ public void testStateCheckAllNoPersistentTasks() { ClusterState state = ClusterState.builder(new ClusterName("_name")) .metadata( Metadata.builder() - .putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, Collections.emptyMap())) + .putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, Collections.emptyMap())) ) .build(); boolean hasRollupJobs = TransportGetRollupJobAction.stateHasRollupJobs(request, state); @@ -67,12 +67,12 @@ public void testStateCheckAllNoPersistentTasks() { public void testStateCheckNoMatchingPersistentTasks() { GetRollupJobsAction.Request request = new GetRollupJobsAction.Request("foo"); - Map> tasks = Collections.singletonMap( + Map> tasks = Collections.singletonMap( "bar", - new PersistentTasksCustomMetadata.PersistentTask<>("bar", "bar", null, 1, null) + new PersistentTasksMetadataSection.PersistentTask<>("bar", "bar", null, 1, null) ); ClusterState state = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, tasks))) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, tasks))) .build(); boolean hasRollupJobs = TransportGetRollupJobAction.stateHasRollupJobs(request, state); assertFalse(hasRollupJobs); @@ -81,12 +81,12 @@ public void testStateCheckNoMatchingPersistentTasks() { public void testStateCheckMatchingPersistentTasks() { GetRollupJobsAction.Request request = new GetRollupJobsAction.Request("foo"); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "foo"), Collections.emptyMap()); - Map> tasks = Collections.singletonMap( + Map> tasks = Collections.singletonMap( "foo", - new PersistentTasksCustomMetadata.PersistentTask<>("foo", RollupJob.NAME, job, 1, null) + new PersistentTasksMetadataSection.PersistentTask<>("foo", RollupJob.NAME, job, 1, null) ); ClusterState state = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, tasks))) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, tasks))) .build(); boolean hasRollupJobs = TransportGetRollupJobAction.stateHasRollupJobs(request, state); assertTrue(hasRollupJobs); @@ -95,12 +95,12 @@ public void testStateCheckMatchingPersistentTasks() { public void testStateCheckAllMatchingPersistentTasks() { GetRollupJobsAction.Request request = new GetRollupJobsAction.Request("_all"); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "foo"), Collections.emptyMap()); - Map> tasks = Collections.singletonMap( + Map> tasks = Collections.singletonMap( "foo", - new PersistentTasksCustomMetadata.PersistentTask<>("foo", RollupJob.NAME, job, 1, null) + new PersistentTasksMetadataSection.PersistentTask<>("foo", RollupJob.NAME, job, 1, null) ); ClusterState state = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, tasks))) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, tasks))) .build(); boolean hasRollupJobs = TransportGetRollupJobAction.stateHasRollupJobs(request, state); assertTrue(hasRollupJobs); @@ -110,11 +110,11 @@ public void testStateCheckAllWithSeveralMatchingPersistentTasks() { GetRollupJobsAction.Request request = new GetRollupJobsAction.Request("_all"); RollupJob job = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "foo"), Collections.emptyMap()); RollupJob job2 = new RollupJob(ConfigTestHelpers.randomRollupJobConfig(random(), "bar"), Collections.emptyMap()); - Map> tasks = Maps.newMapWithExpectedSize(2); - tasks.put("foo", new PersistentTasksCustomMetadata.PersistentTask<>("foo", RollupJob.NAME, job, 1, null)); - tasks.put("bar", new PersistentTasksCustomMetadata.PersistentTask<>("bar", RollupJob.NAME, job2, 1, null)); + Map> tasks = Maps.newMapWithExpectedSize(2); + tasks.put("foo", new PersistentTasksMetadataSection.PersistentTask<>("foo", RollupJob.NAME, job, 1, null)); + tasks.put("bar", new PersistentTasksMetadataSection.PersistentTask<>("bar", RollupJob.NAME, job2, 1, null)); ClusterState state = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, new PersistentTasksCustomMetadata(0L, tasks))) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, new PersistentTasksMetadataSection(0L, tasks))) .build(); boolean hasRollupJobs = TransportGetRollupJobAction.stateHasRollupJobs(request, state); assertTrue(hasRollupJobs); diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java index ee8b4c79d1893..b7b1b3386b73c 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/action/PutJobStateMachineTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.cluster.metadata.MappingMetadata; import org.elasticsearch.common.util.Maps; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval; import org.elasticsearch.test.ESTestCase; @@ -356,12 +356,12 @@ public void testStartTask() { ArgumentCaptor requestCaptor = ArgumentCaptor.forClass(ActionListener.class); doAnswer(invocation -> { - PersistentTasksCustomMetadata.PersistentTask response = new PersistentTasksCustomMetadata.PersistentTask<>( + PersistentTasksMetadataSection.PersistentTask response = new PersistentTasksMetadataSection.PersistentTask<>( job.getConfig().getId(), RollupField.TASK_NAME, job, 123, - mock(PersistentTasksCustomMetadata.Assignment.class) + mock(PersistentTasksMetadataSection.Assignment.class) ); requestCaptor.getValue().onResponse(response); return null; diff --git a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java index 8b63b76cdf248..5a3a27e486d4f 100644 --- a/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java +++ b/x-pack/plugin/rollup/src/test/java/org/elasticsearch/xpack/rollup/job/RollupJobTaskTests.java @@ -22,7 +22,7 @@ import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.node.Node; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.aggregations.InternalAggregations; import org.elasticsearch.search.aggregations.bucket.composite.InternalComposite; import org.elasticsearch.tasks.TaskId; @@ -312,7 +312,7 @@ public void testStartWhenStopping() throws InterruptedException { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { assertThat(taskState, instanceOf(RollupJobStatus.class)); int c = counter.get(); @@ -326,12 +326,12 @@ public void updatePersistentTaskState( fail("Should not have updated persistent statuses > 3 times"); } listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); counter.incrementAndGet(); @@ -423,17 +423,17 @@ public void testStartWhenStopped() throws InterruptedException { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { assertThat(taskState, instanceOf(RollupJobStatus.class)); assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); } @@ -482,17 +482,17 @@ public void testTriggerUnrelated() throws InterruptedException { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { assertThat(taskState, instanceOf(RollupJobStatus.class)); assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); } @@ -544,17 +544,17 @@ public void testTrigger() throws InterruptedException { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { assertThat(taskState, instanceOf(RollupJobStatus.class)); assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); } @@ -638,19 +638,19 @@ public void testTriggerWithoutHeaders() throws Exception { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { assertThat(taskState, instanceOf(RollupJobStatus.class)); assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); } else if (counterValue == 1) { @@ -747,19 +747,19 @@ public void testTriggerWithHeaders() throws Exception { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { assertThat(taskState, instanceOf(RollupJobStatus.class)); assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); } else if (counterValue == 1) { @@ -858,19 +858,19 @@ public void testSaveStateChangesIDScheme() throws Exception { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { Integer counterValue = counter.getAndIncrement(); if (counterValue == 0) { assertThat(taskState, instanceOf(RollupJobStatus.class)); assertThat(((RollupJobStatus) taskState).getIndexerState(), equalTo(IndexerState.STARTED)); listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); } else if (counterValue == 1) { @@ -971,7 +971,7 @@ public void testStopWhenStopping() throws InterruptedException { @Override public void updatePersistentTaskState( PersistentTaskState taskState, - ActionListener> listener + ActionListener> listener ) { assertThat(taskState, instanceOf(RollupJobStatus.class)); int c = counter.get(); @@ -987,12 +987,12 @@ public void updatePersistentTaskState( fail("Should not have updated persistent statuses > 4 times"); } listener.onResponse( - new PersistentTasksCustomMetadata.PersistentTask<>( + new PersistentTasksMetadataSection.PersistentTask<>( "foo", RollupField.TASK_NAME, job, 1, - new PersistentTasksCustomMetadata.Assignment("foo", "foo") + new PersistentTasksMetadataSection.Assignment("foo", "foo") ) ); counter.incrementAndGet(); diff --git a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java index 73f12f98f8ca0..72c192f077050 100644 --- a/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java +++ b/x-pack/plugin/searchable-snapshots/src/internalClusterTest/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java @@ -160,7 +160,7 @@ public void testShardAllocationOnInvalidLicense() throws Exception { // We force this by clearing the existing basic license first updateClusterState( currentState -> ClusterState.builder(currentState) - .metadata(Metadata.builder(currentState.metadata()).removeCustom(LicensesMetadata.TYPE).build()) + .metadata(Metadata.builder(currentState.metadata()).removeSection(LicensesMetadata.TYPE).build()) .build() ); diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java index 18ebe65d87986..02fc3fb46635a 100644 --- a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -734,7 +734,7 @@ private static final class RepositoryUuidWatcher implements ClusterStateListener @Override public void clusterChanged(ClusterChangedEvent event) { - final RepositoriesMetadata repositoriesMetadata = event.state().metadata().custom(RepositoriesMetadata.TYPE); + final RepositoriesMetadata repositoriesMetadata = event.state().metadata().section(RepositoriesMetadata.TYPE); if (repositoriesMetadata == null) { knownUuids.clear(); return; diff --git a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java index 6777c38b809e0..dd1509719be0b 100644 --- a/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java +++ b/x-pack/plugin/security/src/test/java/org/elasticsearch/xpack/security/SecurityImplicitBehaviorBootstrapCheckTests.java @@ -141,7 +141,7 @@ public void testUpgradeFrom8xWithExplicitSecuritySettings() throws Exception { private Metadata createLicensesMetadata(TrialLicenseVersion era, String licenseMode) throws Exception { License license = TestUtils.generateSignedLicense(licenseMode, TimeValue.timeValueHours(2)); - return Metadata.builder().putCustom(LicensesMetadata.TYPE, new LicensesMetadata(license, era)).build(); + return Metadata.builder().putSection(LicensesMetadata.TYPE, new LicensesMetadata(license, era)).build(); } private static BuildVersion toBuildVersion(Version version) { diff --git a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java index 46f568d286f9e..f89424e763094 100644 --- a/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java +++ b/x-pack/plugin/shutdown/src/internalClusterTest/java/org/elasticsearch/xpack/shutdown/NodeShutdownTasksIT.java @@ -28,8 +28,8 @@ import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskParams; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.plugins.PersistentTaskPlugin; import org.elasticsearch.plugins.Plugin; @@ -166,7 +166,7 @@ protected TaskExecutor(Client client, ClusterService clusterService, ThreadPool } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( TestTaskParams params, Collection candidateNodes, ClusterState clusterState diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java index 83e72d4146640..3e06d50cab1e7 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/NodeSeenService.java @@ -64,7 +64,7 @@ public void clusterChanged(ClusterChangedEvent event) { return; } - NodesShutdownMetadata eventShutdownMetadata = event.state().metadata().custom(NodesShutdownMetadata.TYPE); + NodesShutdownMetadata eventShutdownMetadata = event.state().metadata().section(NodesShutdownMetadata.TYPE); if (eventShutdownMetadata == null) { // Since there's no shutdown metadata at all, we know no shutdowns have ever been registered and we can bail. @@ -119,7 +119,7 @@ public ClusterState execute(BatchExecutionContext batc return ClusterState.builder(initialState) .metadata( Metadata.builder(initialState.metadata()) - .putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) + .putSection(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) .build() ) .build(); diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java index d66f2cbddd182..0fe277f844d87 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeAction.java @@ -98,7 +98,7 @@ public ClusterState execute(BatchExecutionContext batchE return ClusterState.builder(batchExecutionContext.initialState()) .metadata( Metadata.builder(batchExecutionContext.initialState().metadata()) - .putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) + .putSection(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) ) .build(); } @@ -132,7 +132,7 @@ public TransportDeleteShutdownNodeAction( protected void masterOperation(Task task, Request request, ClusterState state, ActionListener listener) throws Exception { { // This block solely to ensure this NodesShutdownMetadata isn't accidentally used in the cluster state update task below - NodesShutdownMetadata nodesShutdownMetadata = state.metadata().custom(NodesShutdownMetadata.TYPE); + NodesShutdownMetadata nodesShutdownMetadata = state.metadata().section(NodesShutdownMetadata.TYPE); if (nodesShutdownMetadata == null || nodesShutdownMetadata.get(request.getNodeId()) == null) { throw new ResourceNotFoundException("node [" + request.getNodeId() + "] is not currently shutting down"); } diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java index 33a285128e08c..d740b8bd2ab97 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusAction.java @@ -108,7 +108,7 @@ protected void masterOperation( ActionListener listener ) { CancellableTask cancellableTask = (CancellableTask) task; - NodesShutdownMetadata nodesShutdownMetadata = state.metadata().custom(NodesShutdownMetadata.TYPE); + NodesShutdownMetadata nodesShutdownMetadata = state.metadata().section(NodesShutdownMetadata.TYPE); GetShutdownStatusAction.Response response; if (nodesShutdownMetadata == null) { diff --git a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java index 4e1b8c3cf3b9a..0a9746a0a4fcd 100644 --- a/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java +++ b/x-pack/plugin/shutdown/src/main/java/org/elasticsearch/xpack/shutdown/TransportPutShutdownNodeAction.java @@ -124,7 +124,7 @@ public ClusterState execute(BatchExecutionContext batchExec } final var updatedState = initialState.copyAndUpdateMetadata( - b -> b.putCustom(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) + b -> b.putSection(NodesShutdownMetadata.TYPE, new NodesShutdownMetadata(shutdownMetadata)) ); if (needsReroute == false) { diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java index 88f397ff9ad20..96b5f0ab68385 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportDeleteShutdownNodeActionTests.java @@ -77,7 +77,7 @@ public void init() { public void testNoop() throws Exception { var singleNodeMetadata = mock(SingleNodeShutdownMetadata.class); var nodesShutdownMetadata = new NodesShutdownMetadata(Map.of("node1", singleNodeMetadata)); - var metadata = Metadata.builder().putCustom(TYPE, nodesShutdownMetadata).build(); + var metadata = Metadata.builder().putSection(TYPE, nodesShutdownMetadata).build(); var clusterStateWithShutdown = ClusterState.builder(ClusterState.EMPTY_STATE).metadata(metadata).build(); var request = new DeleteShutdownNodeAction.Request(TEST_REQUEST_TIMEOUT, TEST_REQUEST_TIMEOUT, "node1"); diff --git a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java index 9a1dda99674c9..f7918ccc5ca8e 100644 --- a/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java +++ b/x-pack/plugin/shutdown/src/test/java/org/elasticsearch/xpack/shutdown/TransportGetShutdownStatusActionTests.java @@ -593,7 +593,7 @@ public void testNodeNotInCluster() { .metadata( Metadata.builder() .indices(Map.of()) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -856,7 +856,7 @@ private ClusterState createTestClusterState( .metadata( Metadata.builder() .indices(indicesTable) - .putCustom( + .putSection( NodesShutdownMetadata.TYPE, new NodesShutdownMetadata( Map.of( @@ -880,7 +880,7 @@ private ClusterState setIlmOperationMode(ClusterState state, OperationMode opera return ClusterState.builder(state) .metadata( Metadata.builder(state.metadata()) - .putCustom(LifecycleOperationMetadata.TYPE, new LifecycleOperationMetadata(operationMode, currentSLMMode(state))) + .putSection(LifecycleOperationMetadata.TYPE, new LifecycleOperationMetadata(operationMode, currentSLMMode(state))) ) .build(); } diff --git a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java index d8b1f36c25e54..9abc0b78bfe15 100644 --- a/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java +++ b/x-pack/plugin/slm/src/internalClusterTest/java/org/elasticsearch/xpack/slm/SLMStatDisruptionIT.java @@ -580,13 +580,13 @@ private void assertMetadata(String policyName, long taken, long failure, long in private SnapshotLifecycleMetadata getSnapshotLifecycleMetadata() { final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); ClusterState state = clusterStateResponse.getState(); - return state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + return state.metadata().section(SnapshotLifecycleMetadata.TYPE); } private RegisteredPolicySnapshots getRegisteredSnapshots() { final ClusterStateResponse clusterStateResponse = client().admin().cluster().state(new ClusterStateRequest()).actionGet(); ClusterState state = clusterStateResponse.getState(); - return state.metadata().custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); + return state.metadata().section(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); } private SnapshotInfo getSnapshotInfo(String repository, String snapshot) { diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMUsageTransportAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMUsageTransportAction.java index 76bcb95b7f740..dc4151f664a24 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMUsageTransportAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SLMUsageTransportAction.java @@ -50,7 +50,7 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - final SnapshotLifecycleMetadata slmMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + final SnapshotLifecycleMetadata slmMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE); final SLMFeatureSetUsage usage = new SLMFeatureSetUsage(slmMeta == null ? null : slmMeta.getStats()); listener.onResponse(new XPackUsageFeatureResponse(usage)); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java index 6076214833704..c69deaed567dc 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorService.java @@ -102,7 +102,7 @@ public String name() { @Override public HealthIndicatorResult calculate(boolean verbose, int maxAffectedResourcesCount, HealthInfo healthInfo) { final ClusterState currentState = clusterService.state(); - var slmMetadata = currentState.metadata().custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); + var slmMetadata = currentState.metadata().section(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); final OperationMode currentMode = currentSLMMode(currentState); if (slmMetadata.getSnapshotConfigurations().isEmpty()) { return createIndicator( diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java index 0d79ecf31670c..7df89ff621ff9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycle.java @@ -13,7 +13,7 @@ import org.elasticsearch.client.internal.Client; import org.elasticsearch.client.internal.OriginSettingClient; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.io.stream.NamedWriteableRegistry; @@ -167,12 +167,12 @@ private static List xContentEntries() { return Arrays.asList( // Custom Metadata new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(SnapshotLifecycleMetadata.TYPE), parser -> SnapshotLifecycleMetadata.PARSER.parse(parser, null) ), new NamedXContentRegistry.Entry( - Metadata.Custom.class, + MetadataSection.class, new ParseField(RegisteredPolicySnapshots.TYPE), RegisteredPolicySnapshots::parse ) diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java index 6d77926149334..d974701599bdf 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleService.java @@ -138,14 +138,14 @@ static boolean slmStopping(ClusterState state) { * Schedule all non-scheduled snapshot jobs contained in the cluster state */ public void scheduleSnapshotJobs(final ClusterState state) { - SnapshotLifecycleMetadata snapMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata snapMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE); if (snapMeta != null) { snapMeta.getSnapshotConfigurations().values().forEach(this::maybeScheduleSnapshot); } } public void cleanupDeletedPolicies(final ClusterState state) { - SnapshotLifecycleMetadata snapMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata snapMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE); if (snapMeta != null) { // Retrieve all of the expected policy job ids from the policies in the metadata final Set policyJobIds = snapMeta.getSnapshotConfigurations() diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java index adf011e0ade37..825faa2c8ae03 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTask.java @@ -199,7 +199,7 @@ private static void submitUnbatchedTask( * For the given job id, return an optional policy metadata object, if one exists */ static Optional getSnapPolicyMetadata(final String jobId, final ClusterState state) { - return Optional.ofNullable((SnapshotLifecycleMetadata) state.metadata().custom(SnapshotLifecycleMetadata.TYPE)) + return Optional.ofNullable((SnapshotLifecycleMetadata) state.metadata().section(SnapshotLifecycleMetadata.TYPE)) .map(SnapshotLifecycleMetadata::getSnapshotConfigurations) .flatMap( configMap -> configMap.values() @@ -272,9 +272,9 @@ static WriteJobStatus failure(String policyId, SnapshotId snapshotId, long times @Override public ClusterState execute(ClusterState currentState) throws Exception { SnapshotLifecycleMetadata snapMeta = currentState.metadata() - .custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); + .section(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); RegisteredPolicySnapshots registeredSnapshots = currentState.metadata() - .custom(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); + .section(RegisteredPolicySnapshots.TYPE, RegisteredPolicySnapshots.EMPTY); Map snapLifecycles = new HashMap<>(snapMeta.getSnapshotConfigurations()); SnapshotLifecyclePolicyMetadata policyMetadata = snapLifecycles.get(policyName); @@ -348,8 +348,8 @@ public ClusterState execute(ClusterState currentState) throws Exception { newStats ); Metadata newMeta = Metadata.builder(currentState.metadata()) - .putCustom(SnapshotLifecycleMetadata.TYPE, lifecycleMetadata) - .putCustom(RegisteredPolicySnapshots.TYPE, new RegisteredPolicySnapshots(newRegistered)) + .putSection(SnapshotLifecycleMetadata.TYPE, lifecycleMetadata) + .putSection(RegisteredPolicySnapshots.TYPE, new RegisteredPolicySnapshots(newRegistered)) .build(); return ClusterState.builder(currentState).metadata(newMeta).build(); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java index 0cf1373e92beb..804a9deaac6a0 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/SnapshotRetentionTask.java @@ -169,7 +169,7 @@ public void onFailure(Exception e) { } static Map getAllPoliciesWithRetentionEnabled(final ClusterState state) { - final SnapshotLifecycleMetadata snapMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + final SnapshotLifecycleMetadata snapMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE); if (snapMeta == null) { return Collections.emptyMap(); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java index 11478e929c76b..74820669cdc5b 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/UpdateSnapshotLifecycleStatsTask.java @@ -37,7 +37,7 @@ public class UpdateSnapshotLifecycleStatsTask extends ClusterStateUpdateTask { @Override public ClusterState execute(ClusterState currentState) { final Metadata currentMeta = currentState.metadata(); - final SnapshotLifecycleMetadata currentSlmMeta = currentMeta.custom(SnapshotLifecycleMetadata.TYPE); + final SnapshotLifecycleMetadata currentSlmMeta = currentMeta.section(SnapshotLifecycleMetadata.TYPE); if (currentSlmMeta == null) { return currentState; @@ -51,7 +51,7 @@ public ClusterState execute(ClusterState currentState) { ); return ClusterState.builder(currentState) - .metadata(Metadata.builder(currentMeta).putCustom(SnapshotLifecycleMetadata.TYPE, newSlmMeta)) + .metadata(Metadata.builder(currentMeta).putSection(SnapshotLifecycleMetadata.TYPE, newSlmMeta)) .build(); } diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java index 7fc985b1b2bb8..cb2ab2f362ae9 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportDeleteSnapshotLifecycleAction.java @@ -85,7 +85,7 @@ public static class DeleteSnapshotPolicyTask extends AckedClusterStateUpdateTask @Override public ClusterState execute(ClusterState currentState) { - SnapshotLifecycleMetadata snapMeta = currentState.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata snapMeta = currentState.metadata().section(SnapshotLifecycleMetadata.TYPE); if (snapMeta == null) { throw new ResourceNotFoundException("snapshot lifecycle policy not found: {}", request.getLifecycleId()); } @@ -108,7 +108,7 @@ public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) .metadata( Metadata.builder(metadata) - .putCustom( + .putSection( SnapshotLifecycleMetadata.TYPE, new SnapshotLifecycleMetadata( newConfigs, diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java index 1933430499474..bddba38d786be 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportExecuteSnapshotLifecycleAction.java @@ -71,7 +71,7 @@ protected void masterOperation( ) { try { final String policyId = request.getLifecycleId(); - SnapshotLifecycleMetadata snapMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); + SnapshotLifecycleMetadata snapMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); SnapshotLifecyclePolicyMetadata policyMetadata = snapMeta.getSnapshotConfigurations().get(policyId); if (policyMetadata == null) { listener.onFailure(new IllegalArgumentException("no such snapshot lifecycle policy [" + policyId + "]")); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java index 00dc44318846b..aebc91ce7a521 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleAction.java @@ -68,7 +68,7 @@ protected void masterOperation( final ClusterState state, final ActionListener listener ) { - SnapshotLifecycleMetadata snapMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata snapMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE); if (snapMeta == null) { if (request.getLifecycleIds().length == 0) { listener.onResponse(new GetSnapshotLifecycleAction.Response(Collections.emptyList())); diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java index 689621f514c0d..2ff9b72aef00e 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportGetSnapshotLifecycleStatsAction.java @@ -57,7 +57,7 @@ protected void masterOperation( ClusterState state, ActionListener listener ) { - SnapshotLifecycleMetadata slmMeta = state.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata slmMeta = state.metadata().section(SnapshotLifecycleMetadata.TYPE); if (slmMeta == null) { listener.onResponse(new GetSnapshotLifecycleStatsAction.Response(new SnapshotLifecycleStats())); } else { diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java index 5bffe2c36596b..1e84fef09a2ca 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/action/TransportPutSnapshotLifecycleAction.java @@ -126,7 +126,7 @@ public static class UpdateSnapshotPolicyTask extends AckedClusterStateUpdateTask @Override public ClusterState execute(ClusterState currentState) { SnapshotLifecycleMetadata snapMeta = currentState.metadata() - .custom(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); + .section(SnapshotLifecycleMetadata.TYPE, SnapshotLifecycleMetadata.EMPTY); var currentMode = LifecycleOperationMetadata.currentSLMMode(currentState); final SnapshotLifecyclePolicyMetadata existingPolicyMetadata = snapMeta.getSnapshotConfigurations() .get(request.getLifecycleId()); @@ -155,7 +155,7 @@ public ClusterState execute(ClusterState currentState) { return ClusterState.builder(currentState) .metadata( Metadata.builder(currentState.metadata()) - .putCustom( + .putSection( SnapshotLifecycleMetadata.TYPE, new SnapshotLifecycleMetadata(snapLifecycles, currentMode, snapMeta.getStats()) ) diff --git a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java index f40ea5a56463a..5f03365dbe9cb 100644 --- a/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java +++ b/x-pack/plugin/slm/src/main/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistry.java @@ -115,7 +115,7 @@ public boolean validate(ClusterState state) { .allMatch(name -> state.metadata().templatesV2().containsKey(name)); Optional> maybePolicies = Optional.ofNullable( - state.metadata().custom(IndexLifecycleMetadata.TYPE) + state.metadata().section(IndexLifecycleMetadata.TYPE) ).map(IndexLifecycleMetadata::getPolicies); Set policyNames = getLifecyclePolicies().stream().map(LifecyclePolicy::getName).collect(Collectors.toSet()); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java index 9b0d20308cf76..43fcd04b5f185 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SlmHealthIndicatorServiceTests.java @@ -398,7 +398,7 @@ public void testMappedFieldsForTelemetry() { private static ClusterState createClusterStateWith(SnapshotLifecycleMetadata metadata) { var builder = new ClusterState.Builder(new ClusterName("test-cluster")); if (metadata != null) { - builder.metadata(new Metadata.Builder().putCustom(SnapshotLifecycleMetadata.TYPE, metadata)); + builder.metadata(new Metadata.Builder().putSection(SnapshotLifecycleMetadata.TYPE, metadata)); } return builder.build(); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java index 5b59ac9efc0ab..84b9bb209c4ad 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleServiceTests.java @@ -88,7 +88,7 @@ public void testRepositoryExistenceForExistingRepo() { RepositoryMetadata repo = new RepositoryMetadata("repo", "fs", Settings.EMPTY); RepositoriesMetadata repoMeta = new RepositoriesMetadata(Collections.singletonList(repo)); ClusterState stateWithRepo = ClusterState.builder(state) - .metadata(Metadata.builder().putCustom(RepositoriesMetadata.TYPE, repoMeta)) + .metadata(Metadata.builder().putSection(RepositoriesMetadata.TYPE, repoMeta)) .build(); SnapshotLifecycleService.validateRepositoryExists("repo", stateWithRepo); @@ -505,7 +505,7 @@ public ClusterState createState(SnapshotLifecycleMetadata snapMeta) { } public ClusterState createState(SnapshotLifecycleMetadata snapMeta, boolean localNodeMaster) { - Metadata metadata = Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, snapMeta).build(); + Metadata metadata = Metadata.builder().putSection(SnapshotLifecycleMetadata.TYPE, snapMeta).build(); final DiscoveryNodes.Builder discoveryNodesBuilder = DiscoveryNodes.builder() .add(DiscoveryNodeUtils.create("local", new TransportAddress(TransportAddress.META_ADDRESS, 9300))) .add(DiscoveryNodeUtils.create("remote", new TransportAddress(TransportAddress.META_ADDRESS, 9301))) diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java index 0b273c307cf47..9dc6b3b79cb89 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecycleTaskTests.java @@ -84,7 +84,7 @@ public void testGetSnapMetadata() { ); final ClusterState state = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, meta).build()) + .metadata(Metadata.builder().putSection(SnapshotLifecycleMetadata.TYPE, meta).build()) .build(); final Optional o = SnapshotLifecycleTask.getSnapPolicyMetadata( @@ -108,7 +108,7 @@ public void testSkipCreatingSnapshotWhenJobDoesNotMatch() { ); final ClusterState state = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, meta).build()) + .metadata(Metadata.builder().putSection(SnapshotLifecycleMetadata.TYPE, meta).build()) .build(); final ThreadPool threadPool = new TestThreadPool("test"); @@ -147,7 +147,7 @@ public void testCreateSnapshotOnTrigger() throws Exception { ); final ClusterState state = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, meta).build()) + .metadata(Metadata.builder().putSection(SnapshotLifecycleMetadata.TYPE, meta).build()) .nodes( DiscoveryNodes.builder() .add(DiscoveryNodeUtils.builder("nodeId").name("nodeId").build()) @@ -253,7 +253,7 @@ public void testPartialFailureSnapshot() throws Exception { ); final ClusterState state = ClusterState.builder(new ClusterName("test")) - .metadata(Metadata.builder().putCustom(SnapshotLifecycleMetadata.TYPE, meta).build()) + .metadata(Metadata.builder().putSection(SnapshotLifecycleMetadata.TYPE, meta).build()) .nodes( DiscoveryNodes.builder() .add(DiscoveryNodeUtils.builder("nodeId").name("nodeId").build()) @@ -357,7 +357,7 @@ public void testDeletedPoliciesHaveRegisteredRemoved() throws Exception { ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); ClusterState newClusterState = writeJobStatus.execute(clusterState); - RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().section(RegisteredPolicySnapshots.TYPE); assertEquals(List.of(), newRegisteredPolicySnapshots.getSnapshots()); } @@ -380,7 +380,7 @@ public void testOtherDefinedPoliciesUneffected() throws Exception { ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); ClusterState newClusterState = writeJobStatus.execute(clusterState); - RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().section(RegisteredPolicySnapshots.TYPE); assertEquals(List.of(otherSnapRunning, otherSnapNotRunning), newRegisteredPolicySnapshots.getSnapshotsByPolicy(otherPolicy)); assertEquals(List.of(), newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId)); @@ -403,7 +403,7 @@ public void testInitiatingSnapRemovedButStillRunningRemains() throws Exception { ClusterState clusterState = buildClusterState(definedSlmPolicies, registeredSnapshots, inProgress); ClusterState newClusterState = writeJobStatus.execute(clusterState); - RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().section(RegisteredPolicySnapshots.TYPE); assertEquals(List.of(stillRunning), newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId)); } @@ -425,7 +425,7 @@ public void testInferFailureInitiatedBySuccess() throws Exception { ClusterState newClusterState = writeJobTask.execute(clusterState); // previous failure is now recorded in stats and metadata - SnapshotLifecycleMetadata newSlmMetadata = newClusterState.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata newSlmMetadata = newClusterState.metadata().section(SnapshotLifecycleMetadata.TYPE); SnapshotLifecycleStats newStats = newSlmMetadata.getStats(); SnapshotLifecycleStats.SnapshotPolicyStats snapshotPolicyStats = newStats.getMetrics().get(policyId); assertEquals(1, snapshotPolicyStats.getSnapshotFailedCount()); @@ -437,7 +437,7 @@ public void testInferFailureInitiatedBySuccess() throws Exception { assertEquals(0, newSlmPolicyMetadata.getInvocationsSinceLastSuccess()); // failed snapshot no longer in registeredSnapshot set - RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().section(RegisteredPolicySnapshots.TYPE); List newRegisteredSnapIds = newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId); assertEquals(List.of(stillRunning), newRegisteredSnapIds); } @@ -458,7 +458,7 @@ public void testInferFailureInitiatedByFailure() throws Exception { ClusterState newClusterState = writeJobTask.execute(clusterState); // previous failure is now recorded in stats and metadata - SnapshotLifecycleMetadata newSlmMetadata = newClusterState.metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata newSlmMetadata = newClusterState.metadata().section(SnapshotLifecycleMetadata.TYPE); SnapshotLifecycleStats newStats = newSlmMetadata.getStats(); SnapshotLifecycleStats.SnapshotPolicyStats snapshotPolicyStats = newStats.getMetrics().get(policyId); assertEquals(2, snapshotPolicyStats.getSnapshotFailedCount()); @@ -470,7 +470,7 @@ public void testInferFailureInitiatedByFailure() throws Exception { assertEquals(2, newSlmPolicyMetadata.getInvocationsSinceLastSuccess()); // failed snapshot no longer in registeredSnapshot set - RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().custom(RegisteredPolicySnapshots.TYPE); + RegisteredPolicySnapshots newRegisteredPolicySnapshots = newClusterState.metadata().section(RegisteredPolicySnapshots.TYPE); List newRegisteredSnapIds = newRegisteredPolicySnapshots.getSnapshotsByPolicy(policyId); assertEquals(List.of(stillRunning), newRegisteredSnapIds); } @@ -529,8 +529,8 @@ private static ClusterState buildClusterState( .putCustom(SnapshotsInProgress.TYPE, SnapshotsInProgress.EMPTY.withUpdatedEntriesForRepo(repo, inProgressEntries)) .metadata( Metadata.builder() - .putCustom(SnapshotLifecycleMetadata.TYPE, makeSnapMeta(slmPolicies)) - .putCustom(RegisteredPolicySnapshots.TYPE, new RegisteredPolicySnapshots(policySnapshots)) + .putSection(SnapshotLifecycleMetadata.TYPE, makeSnapMeta(slmPolicies)) + .putSection(RegisteredPolicySnapshots.TYPE, new RegisteredPolicySnapshots(policySnapshots)) ) .build(); diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java index 184d207ee173d..e3ee9fe8ca87c 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/SnapshotRetentionTaskTests.java @@ -99,7 +99,7 @@ public void testGetAllPoliciesWithRetentionEnabled() { // Test with empty SLM metadata Metadata metadata = Metadata.builder() - .putCustom( + .putSection( SnapshotLifecycleMetadata.TYPE, new SnapshotLifecycleMetadata(Collections.emptyMap(), OperationMode.RUNNING, new SnapshotLifecycleStats()) ) @@ -505,7 +505,10 @@ public ClusterState createState(OperationMode mode, SnapshotLifecyclePolicy... p .collect(Collectors.toMap(pm -> pm.getPolicy().getId(), pm -> pm)); Metadata metadata = Metadata.builder() - .putCustom(SnapshotLifecycleMetadata.TYPE, new SnapshotLifecycleMetadata(policyMetadataMap, mode, new SnapshotLifecycleStats())) + .putSection( + SnapshotLifecycleMetadata.TYPE, + new SnapshotLifecycleMetadata(policyMetadataMap, mode, new SnapshotLifecycleStats()) + ) .build(); return ClusterState.builder(new ClusterName("cluster")).metadata(metadata).build(); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java index 71346ebc495d4..f940764d6a6f8 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/action/ReservedSnapshotLifecycleStateServiceTests.java @@ -125,7 +125,7 @@ public void testActionAddRemove() throws Exception { List repositoriesMetadata = List.of(new RepositoryMetadata("repo", "fs", Settings.EMPTY)); Metadata.Builder mdBuilder = Metadata.builder(); - mdBuilder.putCustom(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); + mdBuilder.putSection(RepositoriesMetadata.TYPE, new RepositoriesMetadata(repositoriesMetadata)); ClusterState state = ClusterState.builder(clusterName).metadata(mdBuilder).build(); ReservedSnapshotAction action = new ReservedSnapshotAction(); @@ -175,7 +175,7 @@ public void testActionAddRemove() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, twoPoliciesJSON); assertThat(updatedState.keys(), containsInAnyOrder("daily-snapshots", "daily-snapshots1")); - SnapshotLifecycleMetadata slmMetadata = updatedState.state().metadata().custom(SnapshotLifecycleMetadata.TYPE); + SnapshotLifecycleMetadata slmMetadata = updatedState.state().metadata().section(SnapshotLifecycleMetadata.TYPE); assertThat(slmMetadata.getSnapshotConfigurations().keySet(), containsInAnyOrder("daily-snapshots", "daily-snapshots1")); String onePolicyRemovedJSON = """ @@ -200,7 +200,7 @@ public void testActionAddRemove() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, onePolicyRemovedJSON); assertThat(updatedState.keys(), containsInAnyOrder("daily-snapshots")); - slmMetadata = updatedState.state().metadata().custom(SnapshotLifecycleMetadata.TYPE); + slmMetadata = updatedState.state().metadata().section(SnapshotLifecycleMetadata.TYPE); assertThat(slmMetadata.getSnapshotConfigurations().keySet(), containsInAnyOrder("daily-snapshots")); String onePolicyRenamedJSON = """ @@ -225,7 +225,7 @@ public void testActionAddRemove() throws Exception { prevState = updatedState; updatedState = processJSON(action, prevState, onePolicyRenamedJSON); assertThat(updatedState.keys(), containsInAnyOrder("daily-snapshots-2")); - slmMetadata = updatedState.state().metadata().custom(SnapshotLifecycleMetadata.TYPE); + slmMetadata = updatedState.state().metadata().section(SnapshotLifecycleMetadata.TYPE); assertThat(slmMetadata.getSnapshotConfigurations().keySet(), containsInAnyOrder("daily-snapshots-2")); } diff --git a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java index d5a8faea1c0a0..901d7f4ad396e 100644 --- a/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java +++ b/x-pack/plugin/slm/src/test/java/org/elasticsearch/xpack/slm/history/SnapshotLifecycleTemplateRegistryTests.java @@ -440,7 +440,7 @@ private ClusterState createClusterState( Metadata.builder() .indexTemplates(indexTemplates) .transientSettings(nodeSettings) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java index 25ff3b5311fa2..58d790ca891b1 100644 --- a/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java +++ b/x-pack/plugin/stack/src/test/java/org/elasticsearch/xpack/stack/StackTemplateRegistryTests.java @@ -684,8 +684,8 @@ private ClusterState createClusterState( Metadata.builder() .componentTemplates(componentTemplates) .transientSettings(nodeSettings) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) - .putCustom(IngestMetadata.TYPE, ingestMetadata) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IngestMetadata.TYPE, ingestMetadata) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java index 8ee23e38f9ffe..9cb814c99b72a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/TransformUsageTransportAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.protocol.xpack.XPackUsageRequest; import org.elasticsearch.search.aggregations.AggregationBuilders; import org.elasticsearch.search.aggregations.InternalAggregations; @@ -99,10 +99,10 @@ protected void masterOperation( ClusterState clusterState, ActionListener listener ) { - Collection> transformTasks = TransformTask.findAllTransformTasks(clusterState); + Collection> transformTasks = TransformTask.findAllTransformTasks(clusterState); final int taskCount = transformTasks.size(); final Map transformsCountByState = new HashMap<>(); - for (PersistentTasksCustomMetadata.PersistentTask transformTask : transformTasks) { + for (PersistentTasksMetadataSection.PersistentTask transformTask : transformTasks) { TransformState transformState = (TransformState) transformTask.getState(); Optional.ofNullable(transformState) .map(TransformState::getTaskState) diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java index 935ff04c47d85..baa6cc1904b4f 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransformUpdater.java @@ -20,7 +20,7 @@ import org.elasticsearch.core.Nullable; import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.engine.VersionConflictEngineException; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.xpack.core.ClientHelper; import org.elasticsearch.xpack.core.XPackSettings; import org.elasticsearch.xpack.core.security.SecurityContext; @@ -343,7 +343,7 @@ private static void updateTransformConfiguration( config.getSource().getIndex() ); // If we are running, we should verify that the destination index exists and create it if it does not - if (PersistentTasksCustomMetadata.getTaskWithId(clusterState, config.getId()) != null && dest.length == 0 + if (PersistentTasksMetadataSection.getTaskWithId(clusterState, config.getId()) != null && dest.length == 0 // Verify we have source indices. The user could defer_validations and if the task is already running // we allow source indices to disappear. If the source and destination indices do not exist, don't do anything // the transform will just have to dynamically create the destination index without special mapping. diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java index cb2985b5b1b3a..7d4daccae08e2 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformAction.java @@ -17,7 +17,7 @@ import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.search.builder.SearchSourceBuilder; import org.elasticsearch.search.sort.SortOrder; import org.elasticsearch.tasks.Task; @@ -76,12 +76,12 @@ protected void doExecute(Task task, Request request, ActionListener li // Step 2: Search for all the transform tasks (matching the request) that *do not* have corresponding transform config. ActionListener> searchTransformConfigsListener = ActionListener.wrap(r -> { Set transformConfigIds = r.results().stream().map(TransformConfig::getId).collect(toSet()); - Collection> transformTasks = TransformTask.findTransformTasks( + Collection> transformTasks = TransformTask.findTransformTasks( request.getId(), clusterState ); List errors = transformTasks.stream() - .map(PersistentTasksCustomMetadata.PersistentTask::getId) + .map(PersistentTasksMetadataSection.PersistentTask::getId) .filter(not(transformConfigIds::contains)) .map(transformId -> new Response.Error("dangling_task", Strings.format(DANGLING_TASK_ERROR_MESSAGE_FORMAT, transformId))) .collect(toList()); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java index ae0eb000e70e5..57b6be3f843e3 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportGetTransformStatsAction.java @@ -29,8 +29,8 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; @@ -205,7 +205,8 @@ protected void doExecute(Task task, Request request, ActionListener fi ); ActionListener doExecuteListener = ActionListener.wrap(response -> { - PersistentTasksCustomMetadata tasksInProgress = clusterState.getMetadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasksInProgress = clusterState.getMetadata() + .section(PersistentTasksMetadataSection.TYPE); if (tasksInProgress != null) { // Mutates underlying state object with the assigned node attributes response.getTransformsStats().forEach(dtsasi -> setNodeAttributes(dtsasi, tasksInProgress, clusterState)); @@ -249,7 +250,7 @@ protected void doExecute(Task task, Request request, ActionListener fi private static void setNodeAttributes( TransformStats transformStats, - PersistentTasksCustomMetadata persistentTasksCustomMetadata, + PersistentTasksMetadataSection persistentTasksCustomMetadata, ClusterState clusterState ) { var pTask = persistentTasksCustomMetadata.getTask(transformStats.getId()); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java index 35c97f50b6824..39d0831060b52 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportPutTransformAction.java @@ -24,7 +24,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; @@ -99,7 +99,7 @@ protected void masterOperation(Task task, Request request, ClusterState clusterS String transformId = config.getId(); // quick check whether a transform has already been created under that name - if (PersistentTasksCustomMetadata.getTaskWithId(clusterState, transformId) != null) { + if (PersistentTasksMetadataSection.getTaskWithId(clusterState, transformId) != null) { listener.onFailure( new ResourceAlreadyExistsException(TransformMessages.getMessage(TransformMessages.REST_PUT_TRANSFORM_EXISTS, transformId)) ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportScheduleNowTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportScheduleNowTransformAction.java index ee82c28913143..8a6def9c23a1b 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportScheduleNowTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportScheduleNowTransformAction.java @@ -20,7 +20,7 @@ import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.transport.ActionNotFoundTransportException; @@ -74,7 +74,7 @@ protected void doExecute(Task task, Request request, ActionListener li XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); ActionListener getTransformListener = ActionListener.wrap(unusedConfig -> { - PersistentTasksCustomMetadata.PersistentTask transformTask = TransformTask.getTransformTask(request.getId(), clusterState); + PersistentTasksMetadataSection.PersistentTask transformTask = TransformTask.getTransformTask(request.getId(), clusterState); // to send a request to schedule now the transform at runtime, several requirements must be met: // - transform must be running, meaning a task exists diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportSetTransformResetModeAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportSetTransformResetModeAction.java index b5b50ec6d3997..05976c61ed5c4 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportSetTransformResetModeAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportSetTransformResetModeAction.java @@ -47,11 +47,11 @@ protected ClusterState setState(ClusterState oldState, SetResetModeActionRequest ClusterState.Builder newState = ClusterState.builder(oldState); if (request.shouldDeleteMetadata()) { assert request.isEnabled() == false; // SetResetModeActionRequest should have enforced this - newState.metadata(Metadata.builder(oldState.getMetadata()).removeCustom(TransformMetadata.TYPE).build()); + newState.metadata(Metadata.builder(oldState.getMetadata()).removeSection(TransformMetadata.TYPE).build()); } else { - TransformMetadata.Builder builder = TransformMetadata.Builder.from(oldState.metadata().custom(TransformMetadata.TYPE)) + TransformMetadata.Builder builder = TransformMetadata.Builder.from(oldState.metadata().section(TransformMetadata.TYPE)) .isResetMode(request.isEnabled()); - newState.metadata(Metadata.builder(oldState.getMetadata()).putCustom(TransformMetadata.TYPE, builder.build()).build()); + newState.metadata(Metadata.builder(oldState.getMetadata()).putSection(TransformMetadata.TYPE, builder.build()).build()); } return newState.build(); } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java index 4ad56bf3a661a..3bcb5ca0749d0 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStartTransformAction.java @@ -28,7 +28,7 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.Task; @@ -141,7 +141,7 @@ protected void masterOperation( var parentClient = new ParentTaskAssigningClient(client, parentTaskId); // <5> Wait for the allocated task's state to STARTED - ActionListener> newPersistentTaskActionListener = ActionListener + ActionListener> newPersistentTaskActionListener = ActionListener .wrap(t -> { TransformTaskParams transformTask = transformTaskParamsHolder.get(); assert transformTask != null; @@ -157,7 +157,7 @@ protected void masterOperation( ActionListener createOrGetIndexListener = ActionListener.wrap(unused -> { TransformTaskParams transformTask = transformTaskParamsHolder.get(); assert transformTask != null; - PersistentTasksCustomMetadata.PersistentTask existingTask = TransformTask.getTransformTask(transformTask.getId(), state); + PersistentTasksMetadataSection.PersistentTask existingTask = TransformTask.getTransformTask(transformTask.getId(), state); if (existingTask == null) { // Create the allocated task and wait for it to be started persistentTasksService.sendStartRequest( @@ -294,7 +294,7 @@ protected ClusterBlockException checkBlock(StartTransformAction.Request request, private void cancelTransformTask(String taskId, String transformId, Exception exception, Consumer onFailure) { persistentTasksService.sendRemoveRequest(taskId, null, new ActionListener<>() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask task) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask task) { // We succeeded in canceling the persistent task, but the // problem that caused us to cancel it is the overall result onFailure.accept(exception); @@ -329,7 +329,7 @@ private void waitForTransformTaskStarted( timeout, new PersistentTasksService.WaitForPersistentTaskListener() { @Override - public void onResponse(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public void onResponse(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (predicate.exception != null) { // We want to return to the caller without leaving an unassigned persistent task cancelTransformTask(taskId, params.getId(), predicate.exception, listener::onFailure); @@ -362,18 +362,18 @@ public void onTimeout(TimeValue timeout) { * Important: the methods of this class must NOT throw exceptions. If they did then the callers * of endpoints waiting for a condition tested by this predicate would never get a response. */ - private static class TransformPredicate implements Predicate> { + private static class TransformPredicate implements Predicate> { private volatile Exception exception; @Override - public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTask) { + public boolean test(PersistentTasksMetadataSection.PersistentTask persistentTask) { if (persistentTask == null) { return false; } - PersistentTasksCustomMetadata.Assignment assignment = persistentTask.getAssignment(); + PersistentTasksMetadataSection.Assignment assignment = persistentTask.getAssignment(); if (assignment != null - && assignment.equals(PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT) == false + && assignment.equals(PersistentTasksMetadataSection.INITIAL_ASSIGNMENT) == false && assignment.isAssigned() == false) { // For some reason, the task is not assigned to a node, but is no longer in the `INITIAL_ASSIGNMENT` state // Consider this a failure. @@ -390,7 +390,7 @@ public boolean test(PersistentTasksCustomMetadata.PersistentTask persistentTa // checking for `isNotStopped` as the state COULD be marked as failed for any number of reasons // But if it is in a failed state, _stats will show as much and give good reason to the user. // If it is not able to be assigned to a node all together, we should just close the task completely - private static boolean isNotStopped(PersistentTasksCustomMetadata.PersistentTask task) { + private static boolean isNotStopped(PersistentTasksMetadataSection.PersistentTask task) { TransformState state = (TransformState) task.getState(); return state != null && state.getTaskState().equals(TransformTaskState.STOPPED) == false; } diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java index 34e89986b5bcd..8f65f71cdf0cf 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportStopTransformAction.java @@ -29,8 +29,8 @@ import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.CancellableTask; @@ -95,12 +95,12 @@ public TransportStopTransformAction( } static void validateTaskState(ClusterState state, List transformIds, boolean isForce) { - PersistentTasksCustomMetadata tasks = state.metadata().custom(PersistentTasksCustomMetadata.TYPE); + PersistentTasksMetadataSection tasks = state.metadata().section(PersistentTasksMetadataSection.TYPE); if (isForce == false && tasks != null) { List failedTasks = new ArrayList<>(); List failedReasons = new ArrayList<>(); for (String transformId : transformIds) { - PersistentTasksCustomMetadata.PersistentTask dfTask = tasks.getTask(transformId); + PersistentTasksMetadataSection.PersistentTask dfTask = tasks.getTask(transformId); if (dfTask != null && dfTask.getState() instanceof TransformState && ((TransformState) dfTask.getState()).getTaskState() == TransformTaskState.FAILED) { @@ -383,7 +383,7 @@ private void waitForTransformStopped( return true; } for (String persistentTaskId : persistentTaskIds) { - PersistentTasksCustomMetadata.PersistentTask transformsTask = persistentTasksCustomMetadata.getTask(persistentTaskId); + PersistentTasksMetadataSection.PersistentTask transformsTask = persistentTasksCustomMetadata.getTask(persistentTaskId); // Either the task has successfully stopped or we have seen that it has failed if (transformsTask == null || exceptions.containsKey(persistentTaskId)) { continue; @@ -435,9 +435,9 @@ private void waitForTransformStopped( }, e -> { // waitForPersistentTasksCondition throws a IllegalStateException on timeout if (e instanceof IllegalStateException && e.getMessage().startsWith("Timed out")) { - PersistentTasksCustomMetadata persistentTasksCustomMetadata = clusterService.state() + PersistentTasksMetadataSection persistentTasksCustomMetadata = clusterService.state() .metadata() - .custom(PersistentTasksCustomMetadata.TYPE); + .section(PersistentTasksMetadataSection.TYPE); if (persistentTasksCustomMetadata == null) { listener.onResponse(new Response(Boolean.TRUE)); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java index 6251dab36b2ca..85cea319e8376 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/action/TransportUpdateTransformAction.java @@ -25,7 +25,7 @@ import org.elasticsearch.common.util.concurrent.EsExecutors; import org.elasticsearch.discovery.MasterNotDiscoveredException; import org.elasticsearch.injection.guice.Inject; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.CancellableTask; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -160,7 +160,7 @@ protected void doExecute(Task task, Request request, ActionListener li boolean updateChangesSettings = update.changesSettings(originalConfig); boolean updateChangesHeaders = update.changesHeaders(originalConfig); if (updateChangesSettings || updateChangesHeaders) { - PersistentTasksCustomMetadata.PersistentTask transformTask = TransformTask.getTransformTask( + PersistentTasksMetadataSection.PersistentTask transformTask = TransformTask.getTransformTask( request.getId(), clusterState ); diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java index 24c5d45a38f75..dffb3e2b86008 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformHealthChecker.java @@ -11,7 +11,7 @@ import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.core.Nullable; import org.elasticsearch.health.HealthStatus; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.xpack.core.transform.transforms.AuthorizationState; import org.elasticsearch.xpack.core.transform.transforms.TransformHealth; import org.elasticsearch.xpack.core.transform.transforms.TransformHealthIssue; diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java index 56e5fd5900cfb..384d03860d955 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformNodes.java @@ -16,9 +16,9 @@ import org.elasticsearch.common.io.stream.Writeable; import org.elasticsearch.common.logging.HeaderWarning; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.transport.TransportRequest; import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponseHandler; @@ -56,8 +56,11 @@ public static TransformNodeAssignments transformTaskNodes(List transform Set transformIdsSet = new HashSet<>(transformIds); - Collection> tasks = TransformTask.findTransformTasks(transformIdsSet, clusterState); - for (PersistentTasksCustomMetadata.PersistentTask task : tasks) { + Collection> tasks = TransformTask.findTransformTasks( + transformIdsSet, + clusterState + ); + for (PersistentTasksMetadataSection.PersistentTask task : tasks) { if (task.isAssigned()) { executorNodes.add(task.getExecutorNode()); assigned.add(task.getId()); @@ -87,8 +90,8 @@ public static TransformNodeAssignments findPersistentTasks(String transformId, C Set assigned = new HashSet<>(); Set waitingForAssignment = new HashSet<>(); - Collection> tasks = TransformTask.findTransformTasks(transformId, clusterState); - for (PersistentTasksCustomMetadata.PersistentTask task : tasks) { + Collection> tasks = TransformTask.findTransformTasks(transformId, clusterState); + for (PersistentTasksMetadataSection.PersistentTask task : tasks) { if (task.isAssigned()) { executorNodes.add(task.getExecutorNode()); assigned.add(task.getId()); @@ -114,7 +117,7 @@ public static Assignment getAssignment(String transformId, ClusterState clusterS return task.getAssignment(); } - return PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT; + return PersistentTasksMetadataSection.INITIAL_ASSIGNMENT; } /** diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java index 279c59b8b712d..a5fd4aec0122a 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutor.java @@ -29,8 +29,8 @@ import org.elasticsearch.core.Tuple; import org.elasticsearch.persistent.AllocatedPersistentTask; import org.elasticsearch.persistent.PersistentTaskState; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; import org.elasticsearch.persistent.PersistentTasksExecutor; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.xpack.core.indexing.IndexerState; @@ -105,7 +105,7 @@ public TransformPersistentTasksExecutor( } @Override - public PersistentTasksCustomMetadata.Assignment getAssignment( + public PersistentTasksMetadataSection.Assignment getAssignment( TransformTaskParams params, Collection candidateNodes, ClusterState clusterState @@ -118,7 +118,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( * Operations on the transform node happen in {@link #nodeOperation()} */ if (TransformMetadata.getTransformMetadata(clusterState).isResetMode()) { - return new PersistentTasksCustomMetadata.Assignment( + return new PersistentTasksMetadataSection.Assignment( null, "Transform task will not be assigned as a feature reset is in progress." ); @@ -132,7 +132,7 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( + String.join(",", unavailableIndices) + "]"; logger.debug(reason); - return new PersistentTasksCustomMetadata.Assignment(null, reason); + return new PersistentTasksMetadataSection.Assignment(null, reason); } DiscoveryNode discoveryNode = selectLeastLoadedNode( clusterState, @@ -152,10 +152,10 @@ public PersistentTasksCustomMetadata.Assignment getAssignment( + "]"; logger.debug(reason); - return new PersistentTasksCustomMetadata.Assignment(null, reason); + return new PersistentTasksMetadataSection.Assignment(null, reason); } - return new PersistentTasksCustomMetadata.Assignment(discoveryNode.getId(), ""); + return new PersistentTasksMetadataSection.Assignment(discoveryNode.getId(), ""); } static List verifyIndicesPrimaryShardsAreActive(ClusterState clusterState, IndexNameExpressionResolver resolver) { @@ -484,7 +484,7 @@ protected AllocatedPersistentTask createTask( String type, String action, TaskId parentTaskId, - PersistentTasksCustomMetadata.PersistentTask persistentTask, + PersistentTasksMetadataSection.PersistentTask persistentTask, Map headers ) { return new TransformTask( diff --git a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java index 7c9a22aa9fbfe..e0d35469aaf29 100644 --- a/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java +++ b/x-pack/plugin/transform/src/main/java/org/elasticsearch/xpack/transform/transforms/TransformTask.java @@ -20,8 +20,8 @@ import org.elasticsearch.core.Predicates; import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.AllocatedPersistentTask; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; @@ -634,7 +634,7 @@ public static Collection> findTransformTasks(Set trans } public static Collection> findTransformTasks(String transformIdPattern, ClusterState clusterState) { - Predicate> taskMatcher = transformIdPattern == null + Predicate> taskMatcher = transformIdPattern == null || Strings.isAllOrWildcard(transformIdPattern) ? Predicates.always() : t -> { TransformTaskParams transformParams = (TransformTaskParams) t.getParams(); return Regex.simpleMatch(transformIdPattern, transformParams.getId()); @@ -648,7 +648,7 @@ public TransformContext getContext() { } private static Collection> findTransformTasks(Predicate> predicate, ClusterState clusterState) { - PersistentTasksCustomMetadata pTasksMeta = PersistentTasksCustomMetadata.getPersistentTasksCustomMetadata(clusterState); + PersistentTasksMetadataSection pTasksMeta = PersistentTasksMetadataSection.getPersistentTasksCustomMetadata(clusterState); if (pTasksMeta == null) { return Collections.emptyList(); } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformNodeTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformNodeTests.java index 3df566fd70047..19c86a3658d53 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformNodeTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/TransformNodeTests.java @@ -103,7 +103,7 @@ private Supplier> clusterState(String nodeId) { } var state = ClusterState.builder(ClusterName.DEFAULT) - .metadata(Metadata.builder().putCustom(NodesShutdownMetadata.TYPE, nodesShutdownMetadata).build()) + .metadata(Metadata.builder().putSection(NodesShutdownMetadata.TYPE, nodesShutdownMetadata).build()) .nodes(nodes) .build(); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java index 08e0982b2ab84..d01cb6c05cf98 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/action/TransportStopTransformActionTests.java @@ -19,7 +19,7 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.persistent.PersistentTaskResponse; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.persistent.RemovePersistentTaskAction; import org.elasticsearch.rest.RestStatus; @@ -55,8 +55,8 @@ public class TransportStopTransformActionTests extends ESTestCase { - private Metadata.Builder buildMetadata(PersistentTasksCustomMetadata ptasks) { - return Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, ptasks); + private Metadata.Builder buildMetadata(PersistentTasksMetadataSection ptasks) { + return Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, ptasks); } public void testTaskStateValidationWithNoTasks() { @@ -64,19 +64,19 @@ public void testTaskStateValidationWithNoTasks() { ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metadata(metadata); TransportStopTransformAction.validateTaskState(csBuilder.build(), List.of("non-failed-task"), false); - PersistentTasksCustomMetadata.Builder pTasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder pTasksBuilder = PersistentTasksMetadataSection.builder(); csBuilder = ClusterState.builder(new ClusterName("_name")).metadata(buildMetadata(pTasksBuilder.build())); TransportStopTransformAction.validateTaskState(csBuilder.build(), List.of("non-failed-task"), false); } public void testTaskStateValidationWithTransformTasks() { // Test with the task state being null - PersistentTasksCustomMetadata.Builder pTasksBuilder = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection.Builder pTasksBuilder = PersistentTasksMetadataSection.builder() .addTask( "non-failed-task", TransformTaskParams.NAME, new TransformTaskParams("transform-task-1", TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("current-data-node-with-1-tasks", "") + new PersistentTasksMetadataSection.Assignment("current-data-node-with-1-tasks", "") ); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).metadata(buildMetadata(pTasksBuilder.build())); @@ -96,7 +96,7 @@ public void testTaskStateValidationWithTransformTasks() { "failed-task", TransformTaskParams.NAME, new TransformTaskParams("transform-task-1", TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("current-data-node-with-1-tasks", "") + new PersistentTasksMetadataSection.Assignment("current-data-node-with-1-tasks", "") ) .updateTaskState( "failed-task", @@ -128,7 +128,7 @@ public void testTaskStateValidationWithTransformTasks() { "failed-task-2", TransformTaskParams.NAME, new TransformTaskParams("transform-task-2", TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("current-data-node-with-2-tasks", "") + new PersistentTasksMetadataSection.Assignment("current-data-node-with-2-tasks", "") ) .updateTaskState( "failed-task-2", @@ -292,7 +292,7 @@ private static Answer withResponse() { return invocationOnMock -> { @SuppressWarnings("unchecked") var l = (ActionListener) invocationOnMock.getArguments()[2]; - l.onResponse(new PersistentTaskResponse((PersistentTasksCustomMetadata.PersistentTask) null)); + l.onResponse(new PersistentTaskResponse((PersistentTasksMetadataSection.PersistentTask) null)); return null; }; } diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformNodesTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformNodesTests.java index 0f492d4250bc1..49fd05fc46782 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformNodesTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformNodesTests.java @@ -20,7 +20,7 @@ import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.core.TimeValue; import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; import org.elasticsearch.test.ESTestCase; import org.elasticsearch.xcontent.XContentBuilder; import org.elasticsearch.xpack.core.transform.TransformConfigVersion; @@ -34,7 +34,7 @@ import static org.elasticsearch.cluster.node.DiscoveryNodeRole.REMOTE_CLUSTER_CLIENT_ROLE; import static org.elasticsearch.cluster.node.DiscoveryNodeRole.TRANSFORM_ROLE; -import static org.elasticsearch.persistent.PersistentTasksCustomMetadata.INITIAL_ASSIGNMENT; +import static org.elasticsearch.persistent.PersistentTasksMetadataSection.INITIAL_ASSIGNMENT; import static org.elasticsearch.test.hamcrest.OptionalMatchers.isEmpty; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; @@ -51,18 +51,18 @@ public void testTransformNodes() { String transformIdOther = "df-id-other"; String transformIdStopped = "df-id-stopped"; - PersistentTasksCustomMetadata.Builder tasksBuilder = PersistentTasksCustomMetadata.builder(); + PersistentTasksMetadataSection.Builder tasksBuilder = PersistentTasksMetadataSection.builder(); tasksBuilder.addTask( transformIdFoo, TransformField.TASK_NAME, new TransformTaskParams(transformIdFoo, TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("node-1", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-1", "test assignment") ); tasksBuilder.addTask( transformIdBar, TransformField.TASK_NAME, new TransformTaskParams(transformIdBar, TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.addTask("test-task1", "testTasks", new PersistentTaskParams() { @Override @@ -84,28 +84,28 @@ public void writeTo(StreamOutput out) { public XContentBuilder toXContent(XContentBuilder builder, Params params) { return null; } - }, new PersistentTasksCustomMetadata.Assignment("node-3", "test assignment")); + }, new PersistentTasksMetadataSection.Assignment("node-3", "test assignment")); tasksBuilder.addTask( transformIdFailed, TransformField.TASK_NAME, new TransformTaskParams(transformIdFailed, TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment(null, "awaiting reassignment after node loss") + new PersistentTasksMetadataSection.Assignment(null, "awaiting reassignment after node loss") ); tasksBuilder.addTask( transformIdBaz, TransformField.TASK_NAME, new TransformTaskParams(transformIdBaz, TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("node-2", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-2", "test assignment") ); tasksBuilder.addTask( transformIdOther, TransformField.TASK_NAME, new TransformTaskParams(transformIdOther, TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("node-3", "test assignment") + new PersistentTasksMetadataSection.Assignment("node-3", "test assignment") ); ClusterState cs = ClusterState.builder(new ClusterName("_name")) - .metadata(Metadata.builder().putCustom(PersistentTasksCustomMetadata.TYPE, tasksBuilder.build())) + .metadata(Metadata.builder().putSection(PersistentTasksMetadataSection.TYPE, tasksBuilder.build())) .build(); // don't ask for transformIdOther @@ -282,16 +282,16 @@ public void testGetAssignment() { TimeValue.timeValueSeconds(10), false ); - PersistentTasksCustomMetadata.Assignment assignment2 = new PersistentTasksCustomMetadata.Assignment( + PersistentTasksMetadataSection.Assignment assignment2 = new PersistentTasksMetadataSection.Assignment( randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10) ); ClusterState clusterState = ClusterState.builder(new ClusterName("some-cluster")) .metadata( Metadata.builder() - .putCustom( - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata.builder() + .putSection( + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection.builder() .addTask("transform-1", TransformTaskParams.NAME, transformTaskParams1, null) .addTask("transform-2", TransformTaskParams.NAME, transformTaskParams2, assignment2) .build() diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java index 07801221adc3b..d3b7d2f97634a 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformPersistentTasksExecutorTests.java @@ -31,8 +31,8 @@ import org.elasticsearch.index.IndexVersion; import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.indices.TestIndexNameExpressionResolver; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.Assignment; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.Assignment; import org.elasticsearch.tasks.TaskId; import org.elasticsearch.tasks.TaskManager; import org.elasticsearch.test.ESTestCase; @@ -513,28 +513,28 @@ private ClusterState buildClusterState(DiscoveryNodes.Builder nodes) { Metadata.Builder metadata = Metadata.builder(); RoutingTable.Builder routingTable = RoutingTable.builder(); addIndices(metadata, routingTable); - PersistentTasksCustomMetadata.Builder pTasksBuilder = PersistentTasksCustomMetadata.builder() + PersistentTasksMetadataSection.Builder pTasksBuilder = PersistentTasksMetadataSection.builder() .addTask( "transform-task-1", TransformTaskParams.NAME, new TransformTaskParams("transform-task-1", TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("current-data-node-with-1-tasks", "") + new PersistentTasksMetadataSection.Assignment("current-data-node-with-1-tasks", "") ) .addTask( "transform-task-2", TransformTaskParams.NAME, new TransformTaskParams("transform-task-2", TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("current-data-node-with-2-tasks", "") + new PersistentTasksMetadataSection.Assignment("current-data-node-with-2-tasks", "") ) .addTask( "transform-task-3", TransformTaskParams.NAME, new TransformTaskParams("transform-task-3", TransformConfigVersion.CURRENT, null, false), - new PersistentTasksCustomMetadata.Assignment("current-data-node-with-2-tasks", "") + new PersistentTasksMetadataSection.Assignment("current-data-node-with-2-tasks", "") ); - PersistentTasksCustomMetadata pTasks = pTasksBuilder.build(); - metadata.putCustom(PersistentTasksCustomMetadata.TYPE, pTasks); + PersistentTasksMetadataSection pTasks = pTasksBuilder.build(); + metadata.putSection(PersistentTasksMetadataSection.TYPE, pTasks); ClusterState.Builder csBuilder = ClusterState.builder(new ClusterName("_name")).nodes(nodes); csBuilder.routingTable(routingTable.build()); diff --git a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java index 31bd365250e3c..5b8c9f32d58ff 100644 --- a/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java +++ b/x-pack/plugin/transform/src/test/java/org/elasticsearch/xpack/transform/transforms/TransformTaskTests.java @@ -22,8 +22,8 @@ import org.elasticsearch.core.TimeValue; import org.elasticsearch.health.HealthStatus; import org.elasticsearch.persistent.PersistentTaskParams; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata; -import org.elasticsearch.persistent.PersistentTasksCustomMetadata.PersistentTask; +import org.elasticsearch.persistent.PersistentTasksMetadataSection; +import org.elasticsearch.persistent.PersistentTasksMetadataSection.PersistentTask; import org.elasticsearch.persistent.PersistentTasksService; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.tasks.TaskId; @@ -362,9 +362,9 @@ public void testGetTransformTask() { ClusterState clusterState = ClusterState.builder(new ClusterName("some-cluster")) .metadata( Metadata.builder() - .putCustom( - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata.builder() + .putSection( + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection.builder() .addTask("other-1", "other", null, null) .addTask("other-2", "other", null, null) .addTask("other-3", "other", null, null) @@ -382,9 +382,9 @@ public void testGetTransformTask() { ClusterState clusterState = ClusterState.builder(new ClusterName("some-cluster")) .metadata( Metadata.builder() - .putCustom( - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata.builder() + .putSection( + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection.builder() .addTask("transform-1", TransformTaskParams.NAME, transformTaskParams, null) .addTask("other-1", "other", null, null) .addTask("transform-2", TransformTaskParams.NAME, otherTaskParams, null) @@ -418,9 +418,9 @@ public void testFindAllTransformTasks() { ClusterState clusterState = ClusterState.builder(new ClusterName("some-cluster")) .metadata( Metadata.builder() - .putCustom( - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata.builder() + .putSection( + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection.builder() .addTask("other-1", "other", null, null) .addTask("other-2", "other", null, null) .addTask("other-3", "other", null, null) @@ -434,9 +434,9 @@ public void testFindAllTransformTasks() { ClusterState clusterState = ClusterState.builder(new ClusterName("some-cluster")) .metadata( Metadata.builder() - .putCustom( - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata.builder() + .putSection( + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection.builder() .addTask("transform-1", TransformTaskParams.NAME, null, null) .addTask("other-1", "other", null, null) .addTask("transform-2", TransformTaskParams.NAME, null, null) @@ -458,9 +458,9 @@ public void testFindTransformTasks() { ClusterState clusterState = ClusterState.builder(new ClusterName("some-cluster")) .metadata( Metadata.builder() - .putCustom( - PersistentTasksCustomMetadata.TYPE, - PersistentTasksCustomMetadata.builder() + .putSection( + PersistentTasksMetadataSection.TYPE, + PersistentTasksMetadataSection.builder() .addTask("transform-1", TransformTaskParams.NAME, createTransformTaskParams("transform-1"), null) .addTask("other-1", "other", null, null) .addTask("transform-2", TransformTaskParams.NAME, createTransformTaskParams("transform-2"), null) diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java index 5cff1db9dd174..c93df88fb39c7 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/Watcher.java @@ -830,7 +830,7 @@ public String getFeatureName() { @Override public void prepareForIndicesMigration(ClusterService clusterService, Client client, ActionListener> listener) { Client originClient = new OriginSettingClient(client, WATCHER_ORIGIN); - boolean manuallyStopped = Optional.ofNullable(clusterService.state().metadata().custom(WatcherMetadata.TYPE)) + boolean manuallyStopped = Optional.ofNullable(clusterService.state().metadata().section(WatcherMetadata.TYPE)) .map(WatcherMetadata::manuallyStopped) .orElse(false); diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java index cd0e066de2eaf..c8e9dd902cb5e 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleService.java @@ -193,7 +193,7 @@ private void pauseExecution(String reason) { * check if watcher has been stopped manually via the stop API */ private static boolean isWatcherStoppedManually(ClusterState state) { - WatcherMetadata watcherMetadata = state.getMetadata().custom(WatcherMetadata.TYPE); + WatcherMetadata watcherMetadata = state.getMetadata().section(WatcherMetadata.TYPE); return watcherMetadata != null && watcherMetadata.manuallyStopped(); } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherServiceAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherServiceAction.java index 9212780d11fd3..01bfe0129b3f3 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherServiceAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherServiceAction.java @@ -87,14 +87,14 @@ public ClusterState execute(ClusterState clusterState) { XPackPlugin.checkReadyForXPackCustomMetadata(clusterState); WatcherMetadata newWatcherMetadata = new WatcherMetadata(manuallyStopped); - WatcherMetadata currentMetadata = clusterState.metadata().custom(WatcherMetadata.TYPE); + WatcherMetadata currentMetadata = clusterState.metadata().section(WatcherMetadata.TYPE); // adhere to the contract of returning the original state if nothing has changed if (newWatcherMetadata.equals(currentMetadata)) { return clusterState; } else { ClusterState.Builder builder = new ClusterState.Builder(clusterState); - builder.metadata(Metadata.builder(clusterState.getMetadata()).putCustom(WatcherMetadata.TYPE, newWatcherMetadata)); + builder.metadata(Metadata.builder(clusterState.getMetadata()).putSection(WatcherMetadata.TYPE, newWatcherMetadata)); return builder.build(); } } diff --git a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherStatsAction.java b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherStatsAction.java index 220415cf9d094..2752ad39bd205 100644 --- a/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherStatsAction.java +++ b/x-pack/plugin/watcher/src/main/java/org/elasticsearch/xpack/watcher/transport/actions/TransportWatcherStatsAction.java @@ -105,7 +105,7 @@ protected WatcherStatsResponse.Node nodeOperation(WatcherStatsRequest.Node reque } private WatcherMetadata getWatcherMetadata() { - WatcherMetadata watcherMetadata = clusterService.state().getMetadata().custom(WatcherMetadata.TYPE); + WatcherMetadata watcherMetadata = clusterService.state().getMetadata().section(WatcherMetadata.TYPE); if (watcherMetadata == null) { watcherMetadata = new WatcherMetadata(false); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java index 365b072a418ef..212f17d28e7d6 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherLifeCycleServiceTests.java @@ -154,7 +154,7 @@ public void testManualStartStop() { .put(indexMetadataBuilder) .put(IndexTemplateMetadata.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())); if (randomBoolean()) { - metadataBuilder.putCustom(WatcherMetadata.TYPE, new WatcherMetadata(false)); + metadataBuilder.putSection(WatcherMetadata.TYPE, new WatcherMetadata(false)); } Metadata metadata = metadataBuilder.build(); IndexRoutingTable indexRoutingTable = indexRoutingTableBuilder.build(); @@ -170,7 +170,7 @@ public void testManualStartStop() { ClusterState stoppedClusterState = ClusterState.builder(new ClusterName("my-cluster")) .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) - .metadata(Metadata.builder(metadata).putCustom(WatcherMetadata.TYPE, new WatcherMetadata(true)).build()) + .metadata(Metadata.builder(metadata).putSection(WatcherMetadata.TYPE, new WatcherMetadata(true)).build()) .build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stoppedClusterState, clusterState)); @@ -212,7 +212,7 @@ public void testExceptionOnStart() { .put(indexMetadataBuilder) .put(IndexTemplateMetadata.builder(HISTORY_TEMPLATE_NAME).patterns(randomIndexPatterns())); if (randomBoolean()) { - metadataBuilder.putCustom(WatcherMetadata.TYPE, new WatcherMetadata(false)); + metadataBuilder.putSection(WatcherMetadata.TYPE, new WatcherMetadata(false)); } Metadata metadata = metadataBuilder.build(); IndexRoutingTable indexRoutingTable = indexRoutingTableBuilder.build(); @@ -226,7 +226,7 @@ public void testExceptionOnStart() { ClusterState stoppedClusterState = ClusterState.builder(new ClusterName("my-cluster")) .nodes(new DiscoveryNodes.Builder().masterNodeId("node_1").localNodeId("node_1").add(newNode("node_1"))) .routingTable(RoutingTable.builder().add(indexRoutingTable).build()) - .metadata(Metadata.builder(metadata).putCustom(WatcherMetadata.TYPE, new WatcherMetadata(true)).build()) + .metadata(Metadata.builder(metadata).putSection(WatcherMetadata.TYPE, new WatcherMetadata(true)).build()) .build(); lifeCycleService.clusterChanged(new ClusterChangedEvent("foo", stoppedClusterState, clusterState)); diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java index 498d198bb33e2..de4e618e3d8ba 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/WatcherMetadataSerializationTests.java @@ -52,11 +52,11 @@ public void testWatcherMetadataParsingDoesNotSwallowOtherMetadata() throws Excep RepositoriesMetadata repositoriesMetadata = new RepositoriesMetadata(Collections.singletonList(repositoryMetadata)); final Metadata.Builder metadataBuilder = Metadata.builder(); if (randomBoolean()) { // random order of insertion - metadataBuilder.putCustom(watcherMetadata.getWriteableName(), watcherMetadata); - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putSection(watcherMetadata.getWriteableName(), watcherMetadata); + metadataBuilder.putSection(repositoriesMetadata.getWriteableName(), repositoriesMetadata); } else { - metadataBuilder.putCustom(repositoriesMetadata.getWriteableName(), repositoriesMetadata); - metadataBuilder.putCustom(watcherMetadata.getWriteableName(), watcherMetadata); + metadataBuilder.putSection(repositoriesMetadata.getWriteableName(), repositoriesMetadata); + metadataBuilder.putSection(watcherMetadata.getWriteableName(), watcherMetadata); } // serialize metadata XContentBuilder builder = XContentFactory.jsonBuilder(); @@ -69,8 +69,8 @@ public void testWatcherMetadataParsingDoesNotSwallowOtherMetadata() throws Excep // deserialize metadata again Metadata metadata = Metadata.Builder.fromXContent(createParser(builder)); // check that custom metadata still present - assertThat(metadata.custom(watcherMetadata.getWriteableName()), notNullValue()); - assertThat(metadata.custom(repositoriesMetadata.getWriteableName()), notNullValue()); + assertThat(metadata.section(watcherMetadata.getWriteableName()), notNullValue()); + assertThat(metadata.section(repositoriesMetadata.getWriteableName()), notNullValue()); } private static WatcherMetadata getWatcherMetadataFromXContent(XContentParser parser) throws Exception { diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java index fa0dc89fd5106..fc842a3bb354c 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/condition/ScriptConditionTests.java @@ -88,7 +88,7 @@ public void init() throws IOException { scriptService = WatcherMockScriptPlugin.newMockScriptService(scripts); ClusterState.Builder clusterState = new ClusterState.Builder(new ClusterName("_name")); - clusterState.metadata(Metadata.builder().putCustom(ScriptMetadata.TYPE, new ScriptMetadata.Builder(null).build())); + clusterState.metadata(Metadata.builder().putSection(ScriptMetadata.TYPE, new ScriptMetadata.Builder(null).build())); ClusterState cs = clusterState.build(); scriptService.applyClusterState(new ClusterChangedEvent("_source", cs, cs)); } diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java index 6d740bc5c5e4f..1dfc6a1192dc5 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/support/WatcherIndexTemplateRegistryTests.java @@ -336,7 +336,7 @@ private ClusterState createClusterState( Metadata.builder() .templates(indexTemplates) .transientSettings(nodeSettings) - .putCustom(IndexLifecycleMetadata.TYPE, ilmMeta) + .putSection(IndexLifecycleMetadata.TYPE, ilmMeta) .build() ) .blocks(new ClusterBlocks.Builder().build()) diff --git a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java index a04eb3e7d5091..2420f5a57dce6 100644 --- a/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java +++ b/x-pack/plugin/watcher/src/test/java/org/elasticsearch/xpack/watcher/watch/WatchStoreUtilsTests.java @@ -14,6 +14,7 @@ import org.elasticsearch.cluster.metadata.DataStreamTestHelper; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.metadata.MetadataSection; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.index.Index; @@ -31,7 +32,7 @@ public class WatchStoreUtilsTests extends ESTestCase { public void testGetConcreteIndexForDataStream() { String dataStreamName = randomAlphaOfLength(20); Metadata.Builder metadataBuilder = Metadata.builder(); - Map customsBuilder = new HashMap<>(); + Map customsBuilder = new HashMap<>(); Map dataStreams = new HashMap<>(); Map indexMetadataMapBuilder = new HashMap<>(); List indexNames = new ArrayList<>(); @@ -54,7 +55,7 @@ public void testGetConcreteIndexForDataStream() { dataStreamAliases ); customsBuilder.put(DataStreamMetadata.TYPE, dataStreamMetadata); - metadataBuilder.customs(customsBuilder); + metadataBuilder.sections(customsBuilder); IndexMetadata concreteIndex = WatchStoreUtils.getConcreteIndex(dataStreamName, metadataBuilder.build()); assertNotNull(concreteIndex); assertEquals(indexNames.get(indexNames.size() - 1), concreteIndex.getIndex().getName());