diff --git a/docs/resources/elasticsearch_ml_anomaly_detection_job.md b/docs/resources/elasticsearch_ml_anomaly_detection_job.md
new file mode 100644
index 000000000..b79ed6c00
--- /dev/null
+++ b/docs/resources/elasticsearch_ml_anomaly_detection_job.md
@@ -0,0 +1,219 @@
+
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "elasticstack_elasticsearch_ml_anomaly_detection_job Resource - terraform-provider-elasticstack"
+subcategory: "Ml"
+description: |-
+ Creates and manages Machine Learning anomaly detection jobs. See the ML Job API documentation https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html for more details.
+---
+
+# elasticstack_elasticsearch_ml_anomaly_detection_job (Resource)
+
+Creates and manages Machine Learning anomaly detection jobs. See the [ML Job API documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html) for more details.
+
+## Example Usage
+
+```terraform
+terraform {
+ required_providers {
+ elasticstack = {
+ source = "elastic/elasticstack"
+ version = "~> 0.11"
+ }
+ }
+}
+
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+# Basic anomaly detection job
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "example" {
+ job_id = "example-anomaly-detector"
+ description = "Example anomaly detection job for monitoring web traffic"
+ groups = ["web", "monitoring"]
+
+ analysis_config = {
+ bucket_span = "15m"
+ detectors = [
+ {
+ function = "count"
+ detector_description = "Count anomalies in web traffic"
+ },
+ {
+ function = "mean"
+ field_name = "response_time"
+ detector_description = "Mean response time anomalies"
+ }
+ ]
+ influencers = ["client_ip", "status_code"]
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ analysis_limits = {
+ model_memory_limit = "100mb"
+ }
+
+ model_plot_config = {
+ enabled = true
+ }
+
+ model_snapshot_retention_days = 30
+ results_retention_days = 90
+}
+```
+
+
+## Schema
+
+### Required
+
+- `analysis_config` (Attributes) Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational. (see [below for nested schema](#nestedatt--analysis_config))
+- `data_description` (Attributes) Defines the format of the input data when you send data to the job by using the post data API. (see [below for nested schema](#nestedatt--data_description))
+- `job_id` (String) The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.
+
+### Optional
+
+- `allow_lazy_open` (Boolean) Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node.
+- `analysis_limits` (Attributes) Limits can be applied for the resources required to hold the mathematical models in memory. (see [below for nested schema](#nestedatt--analysis_limits))
+- `background_persist_interval` (String) Advanced configuration option. The time between each periodic persistence of the model.
+- `custom_settings` (String) Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information.
+- `daily_model_snapshot_retention_after_days` (Number) Advanced configuration option, which affects the automatic removal of old model snapshots for this job.
+- `description` (String) A description of the job.
+- `elasticsearch_connection` (Block List, Deprecated) Elasticsearch connection configuration block. (see [below for nested schema](#nestedblock--elasticsearch_connection))
+- `groups` (Set of String) A set of job groups. A job can belong to no groups or many.
+- `model_plot_config` (Attributes) This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection. (see [below for nested schema](#nestedatt--model_plot_config))
+- `model_snapshot_retention_days` (Number) Advanced configuration option, which affects the automatic removal of old model snapshots for this job.
+- `renormalization_window_days` (Number) Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen.
+- `results_index_name` (String) A text string that affects the name of the machine learning results index.
+- `results_retention_days` (Number) Advanced configuration option. The period of time (in days) that results are retained.
+
+### Read-Only
+
+- `create_time` (String) The time the job was created.
+- `id` (String) Internal identifier of the resource
+- `job_type` (String) Reserved for future use, currently set to anomaly_detector.
+- `job_version` (String) The version of Elasticsearch when the job was created.
+- `model_snapshot_id` (String) A numerical character string that uniquely identifies the model snapshot.
+
+
+### Nested Schema for `analysis_config`
+
+Required:
+
+- `bucket_span` (String) The size of the interval that the analysis is aggregated into, typically between 15m and 1h. If the anomaly detector is expecting to see data at near real-time frequency, then the bucket_span should be set to a value around 10 times the time between ingested documents. For example, if data comes every second, bucket_span should be 10s; if data comes every 5 minutes, bucket_span should be 50m. For sparse or batch data, use larger bucket_span values.
+- `detectors` (Attributes List) Detector configuration objects. Detectors identify the anomaly detection functions and the fields on which they operate. (see [below for nested schema](#nestedatt--analysis_config--detectors))
+
+Optional:
+
+- `categorization_field_name` (String) For categorization jobs only. The name of the field to categorize.
+- `categorization_filters` (List of String) For categorization jobs only. An array of regular expressions. A categorization message is matched against each regex in the order they are listed in the array.
+- `influencers` (List of String) A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration.
+- `latency` (String) The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second.
+- `model_prune_window` (String) Advanced configuration option. The time interval (in days) between pruning the model.
+- `multivariate_by_fields` (Boolean) This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features.
+- `per_partition_categorization` (Attributes) Settings related to how categorization interacts with partition fields. (see [below for nested schema](#nestedatt--analysis_config--per_partition_categorization))
+- `summary_count_field_name` (String) If this property is specified, the data that is fed to the job is expected to be pre-summarized.
+
+
+### Nested Schema for `analysis_config.detectors`
+
+Required:
+
+- `function` (String) The analysis function that is used. For example, count, rare, mean, min, max, sum.
+
+Optional:
+
+- `by_field_name` (String) The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split.
+- `custom_rules` (Attributes List) Custom rules enable you to customize the way detectors operate. (see [below for nested schema](#nestedatt--analysis_config--detectors--custom_rules))
+- `detector_description` (String) A description of the detector.
+- `exclude_frequent` (String) Contains one of the following values: all, none, by, or over.
+- `field_name` (String) The field that the detector function analyzes. Some functions require a field. Functions that don't require a field are count, rare, and freq_rare.
+- `over_field_name` (String) The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits.
+- `partition_field_name` (String) The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field.
+- `use_null` (Boolean) Defines whether a new series is used as the null series when there is no value for the by or partition fields.
+
+
+### Nested Schema for `analysis_config.detectors.custom_rules`
+
+Optional:
+
+- `actions` (List of String) The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.
+- `conditions` (Attributes List) An array of numeric conditions when the rule applies. (see [below for nested schema](#nestedatt--analysis_config--detectors--custom_rules--conditions))
+
+
+### Nested Schema for `analysis_config.detectors.custom_rules.conditions`
+
+Required:
+
+- `applies_to` (String) Specifies the result property to which the condition applies.
+- `operator` (String) Specifies the condition operator.
+- `value` (Number) The value that is compared against the applies_to field using the operator.
+
+
+
+
+
+### Nested Schema for `analysis_config.per_partition_categorization`
+
+Optional:
+
+- `enabled` (Boolean) To enable this setting, you must also set the partition_field_name property to the same value in every detector that uses the keyword mlcategory. Otherwise, job creation fails.
+- `stop_on_warn` (Boolean) This setting can be set to true only if per-partition categorization is enabled.
+
+
+
+
+### Nested Schema for `data_description`
+
+Optional:
+
+- `field_delimiter` (String) The character used to delimit fields in the data. Only applicable when format is delimited.
+- `format` (String) Only JSON format is supported at this time.
+- `quote_character` (String) The character used to quote fields in the data. Only applicable when format is delimited.
+- `time_field` (String) The name of the field that contains the timestamp.
+- `time_format` (String) The time format, which can be epoch, epoch_ms, or a custom pattern.
+
+
+
+### Nested Schema for `analysis_limits`
+
+Optional:
+
+- `categorization_examples_limit` (Number) The maximum number of examples stored per category in memory and in the results data store.
+- `model_memory_limit` (String) The approximate maximum amount of memory resources that are required for analytical processing.
+
+
+
+### Nested Schema for `elasticsearch_connection`
+
+Optional:
+
+- `api_key` (String, Sensitive) API Key to use for authentication to Elasticsearch
+- `bearer_token` (String, Sensitive) Bearer Token to use for authentication to Elasticsearch
+- `ca_data` (String) PEM-encoded custom Certificate Authority certificate
+- `ca_file` (String) Path to a custom Certificate Authority certificate
+- `cert_data` (String) PEM encoded certificate for client auth
+- `cert_file` (String) Path to a file containing the PEM encoded certificate for client auth
+- `endpoints` (List of String, Sensitive) A list of endpoints where the terraform provider will point to, this must include the http(s) schema and port number.
+- `es_client_authentication` (String, Sensitive) ES Client Authentication field to be used with the JWT token
+- `headers` (Map of String, Sensitive) A list of headers to be sent with each request to Elasticsearch.
+- `insecure` (Boolean) Disable TLS certificate validation
+- `key_data` (String, Sensitive) PEM encoded private key for client auth
+- `key_file` (String) Path to a file containing the PEM encoded private key for client auth
+- `password` (String, Sensitive) Password to use for API authentication to Elasticsearch.
+- `username` (String) Username to use for API authentication to Elasticsearch.
+
+
+
+### Nested Schema for `model_plot_config`
+
+Optional:
+
+- `annotations_enabled` (Boolean) If true, enables calculation and storage of the model change annotations for each entity that is being analyzed.
+- `enabled` (Boolean) If true, enables calculation and storage of the model bounds for each entity that is being analyzed.
+- `terms` (String) Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied.
diff --git a/examples/resources/elasticstack_elasticsearch_ml_anomaly_detection_job/resource.tf b/examples/resources/elasticstack_elasticsearch_ml_anomaly_detection_job/resource.tf
new file mode 100644
index 000000000..1656e3aa3
--- /dev/null
+++ b/examples/resources/elasticstack_elasticsearch_ml_anomaly_detection_job/resource.tf
@@ -0,0 +1,51 @@
+terraform {
+ required_providers {
+ elasticstack = {
+ source = "elastic/elasticstack"
+ version = "~> 0.11"
+ }
+ }
+}
+
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+# Basic anomaly detection job
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "example" {
+ job_id = "example-anomaly-detector"
+ description = "Example anomaly detection job for monitoring web traffic"
+ groups = ["web", "monitoring"]
+
+ analysis_config = {
+ bucket_span = "15m"
+ detectors = [
+ {
+ function = "count"
+ detector_description = "Count anomalies in web traffic"
+ },
+ {
+ function = "mean"
+ field_name = "response_time"
+ detector_description = "Mean response time anomalies"
+ }
+ ]
+ influencers = ["client_ip", "status_code"]
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ analysis_limits = {
+ model_memory_limit = "100mb"
+ }
+
+ model_plot_config = {
+ enabled = true
+ }
+
+ model_snapshot_retention_days = 30
+ results_retention_days = 90
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/README.md b/internal/elasticsearch/ml/anomaly_detection_job/README.md
new file mode 100644
index 000000000..93ae437e6
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/README.md
@@ -0,0 +1,228 @@
+# ML Anomaly Detection Job Resource
+
+This resource creates and manages Machine Learning anomaly detection jobs in Elasticsearch. Anomaly detection identifies unusual patterns in data based on historical data patterns.
+
+## Key Features
+
+- **Complete API Coverage**: Supports all ML anomaly detection job API options including:
+ - Analysis configuration with detectors, influencers, and bucket span
+ - Data description for time-based data
+ - Analysis limits for memory management
+ - Model plot configuration for detailed views
+ - Datafeed configuration for data ingestion
+ - Custom settings and retention policies
+
+- **Job Lifecycle Management**:
+ - Create new anomaly detection jobs
+ - Update job configurations (limited fields)
+ - Delete jobs (automatically closes jobs before deletion)
+ - Import existing jobs
+
+- **Framework Migration**: Built using Terraform Plugin Framework for better performance and type safety
+
+## Supported Operations
+
+### Create Job
+- PUT `/_ml/anomaly_detectors/{job_id}`
+- Supports all job configuration options
+- Includes optional datafeed configuration
+
+### Read Job
+- GET `/_ml/anomaly_detectors/{job_id}`
+- Retrieves current job configuration and status
+
+### Update Job
+- POST `/_ml/anomaly_detectors/{job_id}/_update`
+- Updates modifiable job properties:
+ - description
+ - groups
+ - model_plot_config
+ - analysis_limits.model_memory_limit
+ - renormalization_window_days
+ - results_retention_days
+ - custom_settings
+ - background_persist_interval
+
+### Delete Job
+- POST `/_ml/anomaly_detectors/{job_id}/_close` (if needed)
+- DELETE `/_ml/anomaly_detectors/{job_id}`
+
+## Configuration Examples
+
+### Basic Count Detector
+```hcl
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "basic" {
+ job_id = "basic-count-job"
+ description = "Basic count anomaly detection"
+
+ analysis_config = {
+ bucket_span = "15m"
+ detectors = [
+ {
+ function = "count"
+ detector_description = "Count anomalies"
+ }
+ ]
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ analysis_limits = {
+ model_memory_limit = "10mb"
+ }
+}
+```
+
+### Advanced Multi-Detector Job
+```hcl
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "advanced" {
+ job_id = "advanced-web-analytics"
+ description = "Advanced web analytics anomaly detection"
+ groups = ["web", "analytics"]
+
+ analysis_config = {
+ bucket_span = "15m"
+ detectors = [
+ {
+ function = "count"
+ by_field_name = "client_ip"
+ detector_description = "High request count per IP"
+ },
+ {
+ function = "mean"
+ field_name = "response_time"
+ over_field_name = "url.path"
+ detector_description = "Response time anomalies by URL"
+ },
+ {
+ function = "distinct_count"
+ field_name = "user_id"
+ detector_description = "Unique user count anomalies"
+ }
+ ]
+ influencers = ["client_ip", "url.path", "status_code"]
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ analysis_limits = {
+ model_memory_limit = "100mb"
+ categorization_examples_limit = 10
+ }
+
+ model_plot_config = {
+ enabled = true
+ annotations_enabled = true
+ }
+
+ datafeed_config = {
+ datafeed_id = "datafeed-advanced-web-analytics"
+ indices = ["web-logs-*"]
+ query = jsonencode({
+ bool = {
+ filter = [
+ {
+ range = {
+ "@timestamp" = {
+ gte = "now-7d"
+ }
+ }
+ }
+ ]
+ }
+ })
+ frequency = "30s"
+ query_delay = "60s"
+ scroll_size = 1000
+ }
+
+ model_snapshot_retention_days = 30
+ results_retention_days = 90
+ daily_model_snapshot_retention_after_days = 7
+}
+```
+
+### Categorization Job
+```hcl
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "categorization" {
+ job_id = "log-categorization"
+ description = "Log message categorization job"
+
+ analysis_config = {
+ bucket_span = "1h"
+ categorization_field_name = "message"
+ categorization_filters = [
+ "\\b\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\b", # IP addresses
+ "\\b[A-Fa-f0-9]{8,}\\b" # Hex values
+ ]
+ detectors = [
+ {
+ function = "count"
+ by_field_name = "mlcategory"
+ detector_description = "Log category count anomalies"
+ }
+ ]
+ per_partition_categorization = {
+ enabled = true
+ stop_on_warn = true
+ }
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ analysis_limits = {
+ model_memory_limit = "200mb"
+ categorization_examples_limit = 20
+ }
+}
+```
+
+## Field Validation
+
+The resource includes comprehensive validation:
+
+- **job_id**: Must contain only lowercase alphanumeric characters, hyphens, and underscores
+- **bucket_span**: Must be a valid time interval (e.g., "15m", "1h")
+- **detector.function**: Must be one of the supported ML functions
+- **memory_limit**: Must be a valid memory size format
+- **time_format**: Supports epoch, epoch_ms, or custom patterns
+
+## Import Support
+
+Existing ML anomaly detection jobs can be imported:
+
+```bash
+terraform import elasticstack_elasticsearch_ml_anomaly_detection_job.example existing-job-id
+```
+
+## Error Handling
+
+The resource handles various error scenarios:
+
+- **Job not found**: Gracefully removes resource from state
+- **Insufficient ML capacity**: Provides clear error messages
+- **Configuration conflicts**: Validates detector configurations
+- **Memory limits**: Warns about memory usage patterns
+
+## Best Practices
+
+1. **Memory Sizing**: Start with conservative memory limits and increase as needed
+2. **Bucket Span**: Choose appropriate bucket spans based on data frequency
+3. **Detectors**: Use specific field names for better anomaly detection
+4. **Influencers**: Include relevant fields that might influence anomalies
+5. **Datafeeds**: Use appropriate query delays for real-time data
+
+## Limitations
+
+- Some job properties cannot be updated after creation (analysis_config structure)
+- Jobs must be closed before deletion (handled automatically)
+- Datafeed creation is included but separate datafeed management is recommended for complex scenarios
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/acc_test.go b/internal/elasticsearch/ml/anomaly_detection_job/acc_test.go
new file mode 100644
index 000000000..81595bdb3
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/acc_test.go
@@ -0,0 +1,214 @@
+package anomaly_detection_job_test
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/acctest"
+ sdkacctest "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+)
+
+func TestAccResourceAnomalyDetectionJob(t *testing.T) {
+ jobID := fmt.Sprintf("test-anomaly-detector-%s", sdkacctest.RandStringFromCharSet(10, sdkacctest.CharSetAlphaNum))
+
+ resource.Test(t, resource.TestCase{
+ PreCheck: func() { acctest.PreCheck(t) },
+ ProtoV6ProviderFactories: acctest.Providers,
+ Steps: []resource.TestStep{
+ {
+ Config: testAccResourceAnomalyDetectionJobBasic(jobID),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "job_id", jobID),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "description", "Test anomaly detection job"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.bucket_span", "15m"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.detectors.0.function", "count"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "data_description.time_field", "@timestamp"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "data_description.time_format", "epoch_ms"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "create_time"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "job_type", "anomaly_detector"),
+ ),
+ },
+ {
+ Config: testAccResourceAnomalyDetectionJobComprehensive(jobID),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "job_id", jobID),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "description", "Comprehensive test anomaly detection job"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "groups.#", "2"),
+ // Analysis config checks
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.bucket_span", "10m"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.detectors.#", "2"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.detectors.0.function", "count"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.detectors.1.function", "mean"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.detectors.1.field_name", "response_time"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.influencers.#", "1"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_config.influencers.0", "status_code"),
+ // Analysis limits checks
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_limits.model_memory_limit", "100mb"),
+ // Data description checks
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "data_description.time_field", "@timestamp"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "data_description.time_format", "epoch_ms"),
+ // Model plot config checks
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "model_plot_config.enabled", "true"),
+ // Other settings checks
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "allow_lazy_open", "true"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "background_persist_interval", "1h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "custom_settings", "{\"custom_key\": \"custom_value\"}"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "daily_model_snapshot_retention_after_days", "3"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "model_snapshot_retention_days", "7"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "renormalization_window_days", "14"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "results_retention_days", "30"),
+ // Computed fields
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "create_time"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "job_type", "anomaly_detector"),
+ resource.TestCheckResourceAttrSet("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "job_version"),
+ ),
+ },
+ {
+ Config: testAccResourceAnomalyDetectionJobUpdated(jobID),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "job_id", jobID),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "description", "Updated test anomaly detection job"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "groups.#", "1"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "groups.0", "test-group"),
+ // Verify that updatable fields were actually updated
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "analysis_limits.model_memory_limit", "200mb"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "model_plot_config.enabled", "false"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "allow_lazy_open", "false"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "background_persist_interval", "2h"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "custom_settings", "{\"updated_key\": \"updated_value\"}"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "daily_model_snapshot_retention_after_days", "5"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "model_snapshot_retention_days", "14"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "renormalization_window_days", "30"),
+ resource.TestCheckResourceAttr("elasticstack_elasticsearch_ml_anomaly_detection_job.test", "results_retention_days", "60"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccResourceAnomalyDetectionJobBasic(jobID string) string {
+ return fmt.Sprintf(`
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "test" {
+ job_id = "%s"
+ description = "Test anomaly detection job"
+
+ analysis_config = {
+ bucket_span = "15m"
+ detectors = [
+ {
+ function = "count"
+ detector_description = "Count detector"
+ }
+ ]
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+}
+`, jobID)
+}
+
+func testAccResourceAnomalyDetectionJobComprehensive(jobID string) string {
+ return fmt.Sprintf(`
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "test" {
+ job_id = "%s"
+ description = "Comprehensive test anomaly detection job"
+ groups = ["test-group", "ml-group"]
+
+ analysis_config = {
+ bucket_span = "10m"
+ detectors = [
+ {
+ function = "count"
+ detector_description = "Count detector"
+ },
+ {
+ function = "mean"
+ field_name = "response_time"
+ detector_description = "Mean response time detector"
+ }
+ ]
+ influencers = ["status_code"]
+ }
+
+ analysis_limits = {
+ model_memory_limit = "100mb"
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ model_plot_config = {
+ enabled = true
+ }
+
+ allow_lazy_open = true
+ background_persist_interval = "1h"
+ custom_settings = "{\"custom_key\": \"custom_value\"}"
+ daily_model_snapshot_retention_after_days = 3
+ model_snapshot_retention_days = 7
+ renormalization_window_days = 14
+ results_retention_days = 30
+}
+`, jobID)
+}
+
+func testAccResourceAnomalyDetectionJobUpdated(jobID string) string {
+ return fmt.Sprintf(`
+provider "elasticstack" {
+ elasticsearch {}
+}
+
+resource "elasticstack_elasticsearch_ml_anomaly_detection_job" "test" {
+ job_id = "%s"
+ description = "Updated test anomaly detection job"
+
+ analysis_config = {
+ bucket_span = "15m"
+ detectors = [
+ {
+ function = "count"
+ detector_description = "Count detector"
+ }
+ ]
+ }
+
+ data_description = {
+ time_field = "@timestamp"
+ time_format = "epoch_ms"
+ }
+
+ groups = ["test-group"]
+
+ # Test updating some of the other updatable fields
+ analysis_limits = {
+ model_memory_limit = "200mb"
+ }
+
+ model_plot_config = {
+ enabled = false
+ }
+
+ allow_lazy_open = false
+ background_persist_interval = "2h"
+ custom_settings = "{\"updated_key\": \"updated_value\"}"
+ daily_model_snapshot_retention_after_days = 5
+ model_snapshot_retention_days = 14
+ renormalization_window_days = 30
+ results_retention_days = 60
+}
+`, jobID)
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/create.go b/internal/elasticsearch/ml/anomaly_detection_job/create.go
new file mode 100644
index 000000000..1ff08d8c0
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/create.go
@@ -0,0 +1,85 @@
+package anomaly_detection_job
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/diagutil"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func (r *anomalyDetectionJobResource) create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ if !r.resourceReady(&resp.Diagnostics) {
+ return
+ }
+
+ var plan AnomalyDetectionJobTFModel
+ diags := req.Plan.Get(ctx, &plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ jobID := plan.JobID.ValueString()
+
+ // Convert TF model to API model
+ apiModel, diags := plan.toAPIModel(ctx)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ tflog.Debug(ctx, fmt.Sprintf("Creating ML anomaly detection job: %s", jobID))
+
+ esClient, err := r.client.GetESClient()
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to get Elasticsearch client", err.Error())
+ return
+ }
+
+ // Marshal the API model to JSON
+ body, err := json.Marshal(apiModel)
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to marshal job configuration", err.Error())
+ return
+ }
+
+ // Create the ML job
+ res, err := esClient.ML.PutJob(jobID, bytes.NewReader(body), esClient.ML.PutJob.WithContext(ctx))
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to create ML anomaly detection job", err.Error())
+ return
+ }
+ defer res.Body.Close()
+
+ if diags := diagutil.CheckErrorFromFW(res, fmt.Sprintf("Unable to create ML anomaly detection job: %s", jobID)); diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Read the created job to get the full state.
+ compID, sdkDiags := r.client.ID(ctx, jobID)
+ resp.Diagnostics.Append(diagutil.FrameworkDiagsFromSDK(sdkDiags)...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ plan.ID = types.StringValue(compID.String())
+ found, diags := r.read(ctx, &plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+ if !found {
+ resp.Diagnostics.AddError("Failed to read created job", fmt.Sprintf("Job with ID %s not found after creation", jobID))
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...)
+
+ tflog.Debug(ctx, fmt.Sprintf("Successfully created ML anomaly detection job: %s", jobID))
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/delete.go b/internal/elasticsearch/ml/anomaly_detection_job/delete.go
new file mode 100644
index 000000000..58ad2c8d2
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/delete.go
@@ -0,0 +1,62 @@
+package anomaly_detection_job
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/diagutil"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func (r *anomalyDetectionJobResource) delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ if !r.resourceReady(&resp.Diagnostics) {
+ return
+ }
+
+ var jobIDValue basetypes.StringValue
+ diags := req.State.GetAttribute(ctx, path.Root("job_id"), &jobIDValue)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ jobID := jobIDValue.ValueString()
+
+ tflog.Debug(ctx, fmt.Sprintf("Deleting ML anomaly detection job: %s", jobID))
+
+ esClient, err := r.client.GetESClient()
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to get Elasticsearch client", err.Error())
+ return
+ }
+
+ // First, close the job if it's open
+ closeRes, err := esClient.ML.CloseJob(jobID, esClient.ML.CloseJob.WithContext(ctx))
+ if err != nil {
+ tflog.Warn(ctx, fmt.Sprintf("Failed to close ML job %s before deletion: %s", jobID, err.Error()))
+ // Continue with deletion even if close fails, as the job might already be closed
+ } else {
+ defer closeRes.Body.Close()
+ if closeRes.StatusCode != 200 && closeRes.StatusCode != 409 { // 409 means already closed
+ tflog.Warn(ctx, fmt.Sprintf("Failed to close ML job %s: status %d", jobID, closeRes.StatusCode))
+ }
+ }
+
+ // Delete the ML job
+ res, err := esClient.ML.DeleteJob(jobID, esClient.ML.DeleteJob.WithContext(ctx))
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to delete ML anomaly detection job", err.Error())
+ return
+ }
+ defer res.Body.Close()
+
+ if diags := diagutil.CheckErrorFromFW(res, fmt.Sprintf("Unable to delete ML anomaly detection job: %s", jobID)); diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ tflog.Debug(ctx, fmt.Sprintf("Successfully deleted ML anomaly detection job: %s", jobID))
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/models_api.go b/internal/elasticsearch/ml/anomaly_detection_job/models_api.go
new file mode 100644
index 000000000..d34a1ac17
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/models_api.go
@@ -0,0 +1,130 @@
+package anomaly_detection_job
+
+// AnomalyDetectionJobAPIModel represents the API model for ML anomaly detection jobs
+type AnomalyDetectionJobAPIModel struct {
+ JobID string `json:"job_id"`
+ Description string `json:"description,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+ AnalysisConfig AnalysisConfigAPIModel `json:"analysis_config"`
+ AnalysisLimits *AnalysisLimitsAPIModel `json:"analysis_limits,omitempty"`
+ DataDescription DataDescriptionAPIModel `json:"data_description"`
+ ModelPlotConfig *ModelPlotConfigAPIModel `json:"model_plot_config,omitempty"`
+ AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"`
+ BackgroundPersistInterval string `json:"background_persist_interval,omitempty"`
+ CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
+ DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"`
+ ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"`
+ RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"`
+ ResultsIndexName string `json:"results_index_name,omitempty"`
+ ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"`
+
+ // Read-only fields
+ CreateTime interface{} `json:"create_time,omitempty"`
+ JobType string `json:"job_type,omitempty"`
+ JobVersion string `json:"job_version,omitempty"`
+ ModelSnapshotID string `json:"model_snapshot_id,omitempty"`
+}
+
+// AnalysisConfigAPIModel represents the analysis configuration in API format
+type AnalysisConfigAPIModel struct {
+ BucketSpan string `json:"bucket_span"`
+ CategorizationFieldName string `json:"categorization_field_name,omitempty"`
+ CategorizationFilters []string `json:"categorization_filters,omitempty"`
+ Detectors []DetectorAPIModel `json:"detectors"`
+ Influencers []string `json:"influencers,omitempty"`
+ Latency string `json:"latency,omitempty"`
+ ModelPruneWindow string `json:"model_prune_window,omitempty"`
+ MultivariateByFields *bool `json:"multivariate_by_fields,omitempty"`
+ PerPartitionCategorization *PerPartitionCategorizationAPIModel `json:"per_partition_categorization,omitempty"`
+ SummaryCountFieldName string `json:"summary_count_field_name,omitempty"`
+}
+
+// DetectorAPIModel represents a detector configuration in API format
+type DetectorAPIModel struct {
+ ByFieldName string `json:"by_field_name,omitempty"`
+ DetectorDescription string `json:"detector_description,omitempty"`
+ ExcludeFrequent string `json:"exclude_frequent,omitempty"`
+ FieldName string `json:"field_name,omitempty"`
+ Function string `json:"function"`
+ OverFieldName string `json:"over_field_name,omitempty"`
+ PartitionFieldName string `json:"partition_field_name,omitempty"`
+ UseNull *bool `json:"use_null,omitempty"`
+ CustomRules []CustomRuleAPIModel `json:"custom_rules,omitempty"`
+}
+
+// CustomRuleAPIModel represents a custom rule in API format
+type CustomRuleAPIModel struct {
+ Actions []interface{} `json:"actions,omitempty"`
+ Conditions []RuleConditionAPIModel `json:"conditions,omitempty"`
+}
+
+// RuleConditionAPIModel represents a rule condition in API format
+type RuleConditionAPIModel struct {
+ AppliesTo string `json:"applies_to"`
+ Operator string `json:"operator"`
+ Value float64 `json:"value"`
+}
+
+// AnalysisLimitsAPIModel represents analysis limits in API format
+type AnalysisLimitsAPIModel struct {
+ CategorizationExamplesLimit *int64 `json:"categorization_examples_limit,omitempty"`
+ ModelMemoryLimit string `json:"model_memory_limit,omitempty"`
+}
+
+// DataDescriptionAPIModel represents data description in API format
+type DataDescriptionAPIModel struct {
+ FieldDelimiter string `json:"field_delimiter,omitempty"`
+ Format string `json:"format,omitempty"`
+ QuoteCharacter string `json:"quote_character,omitempty"`
+ TimeField string `json:"time_field,omitempty"`
+ TimeFormat string `json:"time_format,omitempty"`
+}
+
+// ChunkingConfigAPIModel represents chunking configuration in API format
+type ChunkingConfigAPIModel struct {
+ Mode string `json:"mode"`
+ TimeSpan string `json:"time_span,omitempty"`
+}
+
+// DelayedDataCheckConfigAPIModel represents delayed data check configuration in API format
+type DelayedDataCheckConfigAPIModel struct {
+ CheckWindow string `json:"check_window,omitempty"`
+ Enabled bool `json:"enabled"`
+}
+
+// IndicesOptionsAPIModel represents indices options in API format
+type IndicesOptionsAPIModel struct {
+ ExpandWildcards []string `json:"expand_wildcards,omitempty"`
+ IgnoreUnavailable *bool `json:"ignore_unavailable,omitempty"`
+ AllowNoIndices *bool `json:"allow_no_indices,omitempty"`
+ IgnoreThrottled *bool `json:"ignore_throttled,omitempty"`
+}
+
+// ModelPlotConfigAPIModel represents model plot configuration in API format
+type ModelPlotConfigAPIModel struct {
+ AnnotationsEnabled *bool `json:"annotations_enabled,omitempty"`
+ Enabled bool `json:"enabled"`
+ Terms string `json:"terms,omitempty"`
+}
+
+// PerPartitionCategorizationAPIModel represents per-partition categorization in API format
+type PerPartitionCategorizationAPIModel struct {
+ Enabled bool `json:"enabled"`
+ StopOnWarn *bool `json:"stop_on_warn,omitempty"`
+}
+
+// AnomalyDetectionJobUpdateAPIModel represents the API model for updating ML anomaly detection jobs
+// This includes only the fields that can be updated after job creation
+type AnomalyDetectionJobUpdateAPIModel struct {
+ Description *string `json:"description,omitempty"`
+ Groups []string `json:"groups,omitempty"`
+ AnalysisLimits *AnalysisLimitsAPIModel `json:"analysis_limits,omitempty"`
+ ModelPlotConfig *ModelPlotConfigAPIModel `json:"model_plot_config,omitempty"`
+ AllowLazyOpen *bool `json:"allow_lazy_open,omitempty"`
+ BackgroundPersistInterval *string `json:"background_persist_interval,omitempty"`
+ CustomSettings map[string]interface{} `json:"custom_settings,omitempty"`
+ DailyModelSnapshotRetentionAfterDays *int64 `json:"daily_model_snapshot_retention_after_days,omitempty"`
+ ModelSnapshotRetentionDays *int64 `json:"model_snapshot_retention_days,omitempty"`
+ RenormalizationWindowDays *int64 `json:"renormalization_window_days,omitempty"`
+ ResultsRetentionDays *int64 `json:"results_retention_days,omitempty"`
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/models_tf.go b/internal/elasticsearch/ml/anomaly_detection_job/models_tf.go
new file mode 100644
index 000000000..6148e61c8
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/models_tf.go
@@ -0,0 +1,615 @@
+package anomaly_detection_job
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+)
+
+// AnomalyDetectionJobTFModel represents the Terraform resource model for ML anomaly detection jobs
+type AnomalyDetectionJobTFModel struct {
+ ID types.String `tfsdk:"id"`
+ ElasticsearchConnection types.List `tfsdk:"elasticsearch_connection"`
+ JobID types.String `tfsdk:"job_id"`
+ Description types.String `tfsdk:"description"`
+ Groups types.Set `tfsdk:"groups"`
+ AnalysisConfig types.Object `tfsdk:"analysis_config"`
+ AnalysisLimits types.Object `tfsdk:"analysis_limits"`
+ DataDescription types.Object `tfsdk:"data_description"`
+ ModelPlotConfig types.Object `tfsdk:"model_plot_config"`
+ AllowLazyOpen types.Bool `tfsdk:"allow_lazy_open"`
+ BackgroundPersistInterval types.String `tfsdk:"background_persist_interval"`
+ CustomSettings jsontypes.Normalized `tfsdk:"custom_settings"`
+ DailyModelSnapshotRetentionAfterDays types.Int64 `tfsdk:"daily_model_snapshot_retention_after_days"`
+ ModelSnapshotRetentionDays types.Int64 `tfsdk:"model_snapshot_retention_days"`
+ RenormalizationWindowDays types.Int64 `tfsdk:"renormalization_window_days"`
+ ResultsIndexName types.String `tfsdk:"results_index_name"`
+ ResultsRetentionDays types.Int64 `tfsdk:"results_retention_days"`
+
+ // Read-only computed fields
+ CreateTime types.String `tfsdk:"create_time"`
+ JobType types.String `tfsdk:"job_type"`
+ JobVersion types.String `tfsdk:"job_version"`
+ ModelSnapshotID types.String `tfsdk:"model_snapshot_id"`
+}
+
+// AnalysisConfigTFModel represents the analysis configuration
+type AnalysisConfigTFModel struct {
+ BucketSpan types.String `tfsdk:"bucket_span"`
+ CategorizationFieldName types.String `tfsdk:"categorization_field_name"`
+ CategorizationFilters types.List `tfsdk:"categorization_filters"`
+ Detectors types.List `tfsdk:"detectors"`
+ Influencers types.List `tfsdk:"influencers"`
+ Latency types.String `tfsdk:"latency"`
+ ModelPruneWindow types.String `tfsdk:"model_prune_window"`
+ MultivariateByFields types.Bool `tfsdk:"multivariate_by_fields"`
+ PerPartitionCategorization types.Object `tfsdk:"per_partition_categorization"`
+ SummaryCountFieldName types.String `tfsdk:"summary_count_field_name"`
+}
+
+// DetectorTFModel represents a detector configuration
+type DetectorTFModel struct {
+ ByFieldName types.String `tfsdk:"by_field_name"`
+ DetectorDescription types.String `tfsdk:"detector_description"`
+ ExcludeFrequent types.String `tfsdk:"exclude_frequent"`
+ FieldName types.String `tfsdk:"field_name"`
+ Function types.String `tfsdk:"function"`
+ OverFieldName types.String `tfsdk:"over_field_name"`
+ PartitionFieldName types.String `tfsdk:"partition_field_name"`
+ UseNull types.Bool `tfsdk:"use_null"`
+ CustomRules types.List `tfsdk:"custom_rules"`
+}
+
+// CustomRuleTFModel represents a custom rule configuration
+type CustomRuleTFModel struct {
+ Actions types.List `tfsdk:"actions"`
+ Conditions types.List `tfsdk:"conditions"`
+}
+
+// RuleConditionTFModel represents a rule condition
+type RuleConditionTFModel struct {
+ AppliesTo types.String `tfsdk:"applies_to"`
+ Operator types.String `tfsdk:"operator"`
+ Value types.Float64 `tfsdk:"value"`
+}
+
+// AnalysisLimitsTFModel represents analysis limits configuration
+type AnalysisLimitsTFModel struct {
+ CategorizationExamplesLimit types.Int64 `tfsdk:"categorization_examples_limit"`
+ ModelMemoryLimit types.String `tfsdk:"model_memory_limit"`
+}
+
+// DataDescriptionTFModel represents data description configuration
+type DataDescriptionTFModel struct {
+ FieldDelimiter types.String `tfsdk:"field_delimiter"`
+ Format types.String `tfsdk:"format"`
+ QuoteCharacter types.String `tfsdk:"quote_character"`
+ TimeField types.String `tfsdk:"time_field"`
+ TimeFormat types.String `tfsdk:"time_format"`
+}
+
+// ModelPlotConfigTFModel represents model plot configuration
+type ModelPlotConfigTFModel struct {
+ AnnotationsEnabled types.Bool `tfsdk:"annotations_enabled"`
+ Enabled types.Bool `tfsdk:"enabled"`
+ Terms types.String `tfsdk:"terms"`
+}
+
+// PerPartitionCategorizationTFModel represents per-partition categorization configuration
+type PerPartitionCategorizationTFModel struct {
+ Enabled types.Bool `tfsdk:"enabled"`
+ StopOnWarn types.Bool `tfsdk:"stop_on_warn"`
+}
+
+// ToAPIModel converts TF model to AnomalyDetectionJobAPIModel
+func (plan *AnomalyDetectionJobTFModel) toAPIModel(ctx context.Context) (*AnomalyDetectionJobAPIModel, fwdiags.Diagnostics) {
+ var diags fwdiags.Diagnostics
+
+ apiModel := &AnomalyDetectionJobAPIModel{
+ JobID: plan.JobID.ValueString(),
+ Description: plan.Description.ValueString(),
+ }
+
+ // Convert groups
+ if utils.IsKnown(plan.Groups) {
+ var groups []string
+ d := plan.Groups.ElementsAs(ctx, &groups, false)
+ diags.Append(d...)
+ apiModel.Groups = groups
+ }
+
+ // Convert analysis_config
+ var analysisConfig AnalysisConfigTFModel
+ d := plan.AnalysisConfig.As(ctx, &analysisConfig, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ if diags.HasError() {
+ return nil, diags
+ }
+
+ // Convert detectors
+ var detectors []DetectorTFModel
+ d = analysisConfig.Detectors.ElementsAs(ctx, &detectors, false)
+ diags.Append(d...)
+
+ apiDetectors := make([]DetectorAPIModel, len(detectors))
+ for i, detector := range detectors {
+ apiDetectors[i] = DetectorAPIModel{
+ Function: detector.Function.ValueString(),
+ FieldName: detector.FieldName.ValueString(),
+ ByFieldName: detector.ByFieldName.ValueString(),
+ OverFieldName: detector.OverFieldName.ValueString(),
+ PartitionFieldName: detector.PartitionFieldName.ValueString(),
+ DetectorDescription: detector.DetectorDescription.ValueString(),
+ ExcludeFrequent: detector.ExcludeFrequent.ValueString(),
+ }
+ if utils.IsKnown(detector.UseNull) {
+ apiDetectors[i].UseNull = utils.Pointer(detector.UseNull.ValueBool())
+ }
+ }
+
+ // Convert influencers
+ var influencers []string
+ if utils.IsKnown(analysisConfig.Influencers) {
+ d = analysisConfig.Influencers.ElementsAs(ctx, &influencers, false)
+ diags.Append(d...)
+ }
+
+ apiModel.AnalysisConfig = AnalysisConfigAPIModel{
+ BucketSpan: analysisConfig.BucketSpan.ValueString(),
+ CategorizationFieldName: analysisConfig.CategorizationFieldName.ValueString(),
+ Detectors: apiDetectors,
+ Influencers: influencers,
+ Latency: analysisConfig.Latency.ValueString(),
+ ModelPruneWindow: analysisConfig.ModelPruneWindow.ValueString(),
+ SummaryCountFieldName: analysisConfig.SummaryCountFieldName.ValueString(),
+ }
+
+ if utils.IsKnown(analysisConfig.MultivariateByFields) {
+ apiModel.AnalysisConfig.MultivariateByFields = utils.Pointer(analysisConfig.MultivariateByFields.ValueBool())
+ }
+
+ // Convert categorization filters
+ if utils.IsKnown(analysisConfig.CategorizationFilters) {
+ var categorizationFilters []string
+ d = analysisConfig.CategorizationFilters.ElementsAs(ctx, &categorizationFilters, false)
+ diags.Append(d...)
+ apiModel.AnalysisConfig.CategorizationFilters = categorizationFilters
+ }
+
+ // Convert per_partition_categorization
+ if utils.IsKnown(analysisConfig.PerPartitionCategorization) {
+ var perPartitionCategorization PerPartitionCategorizationTFModel
+ d = analysisConfig.PerPartitionCategorization.As(ctx, &perPartitionCategorization, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ apiModel.AnalysisConfig.PerPartitionCategorization = &PerPartitionCategorizationAPIModel{
+ Enabled: perPartitionCategorization.Enabled.ValueBool(),
+ }
+ if utils.IsKnown(perPartitionCategorization.StopOnWarn) {
+ apiModel.AnalysisConfig.PerPartitionCategorization.StopOnWarn = utils.Pointer(perPartitionCategorization.StopOnWarn.ValueBool())
+ }
+ }
+
+ // Convert analysis_limits
+ if utils.IsKnown(plan.AnalysisLimits) {
+ var analysisLimits AnalysisLimitsTFModel
+ d = plan.AnalysisLimits.As(ctx, &analysisLimits, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ apiModel.AnalysisLimits = &AnalysisLimitsAPIModel{
+ ModelMemoryLimit: analysisLimits.ModelMemoryLimit.ValueString(),
+ }
+ if utils.IsKnown(analysisLimits.CategorizationExamplesLimit) {
+ apiModel.AnalysisLimits.CategorizationExamplesLimit = utils.Pointer(analysisLimits.CategorizationExamplesLimit.ValueInt64())
+ }
+ }
+
+ // Convert data_description
+ var dataDescription DataDescriptionTFModel
+ d = plan.DataDescription.As(ctx, &dataDescription, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ apiModel.DataDescription = DataDescriptionAPIModel{
+ TimeField: dataDescription.TimeField.ValueString(),
+ TimeFormat: dataDescription.TimeFormat.ValueString(),
+ Format: dataDescription.Format.ValueString(),
+ FieldDelimiter: dataDescription.FieldDelimiter.ValueString(),
+ QuoteCharacter: dataDescription.QuoteCharacter.ValueString(),
+ }
+
+ // Convert optional fields
+ if utils.IsKnown(plan.AllowLazyOpen) {
+ apiModel.AllowLazyOpen = utils.Pointer(plan.AllowLazyOpen.ValueBool())
+ }
+
+ if utils.IsKnown(plan.BackgroundPersistInterval) {
+ apiModel.BackgroundPersistInterval = plan.BackgroundPersistInterval.ValueString()
+ }
+
+ if utils.IsKnown(plan.CustomSettings) {
+ var customSettings map[string]interface{}
+ if err := json.Unmarshal([]byte(plan.CustomSettings.ValueString()), &customSettings); err != nil {
+ diags.AddError("Failed to parse custom_settings", err.Error())
+ return nil, diags
+ }
+ apiModel.CustomSettings = customSettings
+ }
+
+ if utils.IsKnown(plan.DailyModelSnapshotRetentionAfterDays) {
+ apiModel.DailyModelSnapshotRetentionAfterDays = utils.Pointer(plan.DailyModelSnapshotRetentionAfterDays.ValueInt64())
+ }
+
+ if utils.IsKnown(plan.ModelSnapshotRetentionDays) {
+ apiModel.ModelSnapshotRetentionDays = utils.Pointer(plan.ModelSnapshotRetentionDays.ValueInt64())
+ }
+
+ if utils.IsKnown(plan.RenormalizationWindowDays) {
+ apiModel.RenormalizationWindowDays = utils.Pointer(plan.RenormalizationWindowDays.ValueInt64())
+ }
+
+ if utils.IsKnown(plan.ResultsIndexName) {
+ apiModel.ResultsIndexName = plan.ResultsIndexName.ValueString()
+ }
+
+ if utils.IsKnown(plan.ResultsRetentionDays) {
+ apiModel.ResultsRetentionDays = utils.Pointer(plan.ResultsRetentionDays.ValueInt64())
+ }
+
+ // Convert model_plot_config
+ if utils.IsKnown(plan.ModelPlotConfig) {
+ var modelPlotConfig ModelPlotConfigTFModel
+ d = plan.ModelPlotConfig.As(ctx, &modelPlotConfig, basetypes.ObjectAsOptions{})
+ diags.Append(d...)
+ apiModel.ModelPlotConfig = &ModelPlotConfigAPIModel{
+ Enabled: modelPlotConfig.Enabled.ValueBool(),
+ Terms: modelPlotConfig.Terms.ValueString(),
+ }
+ if utils.IsKnown(modelPlotConfig.AnnotationsEnabled) {
+ apiModel.ModelPlotConfig.AnnotationsEnabled = utils.Pointer(modelPlotConfig.AnnotationsEnabled.ValueBool())
+ }
+ }
+
+ return apiModel, diags
+}
+
+// FromAPIModel populates the model from an API response.
+func (tfModel *AnomalyDetectionJobTFModel) fromAPIModel(ctx context.Context, apiModel *AnomalyDetectionJobAPIModel) fwdiags.Diagnostics {
+ var diags fwdiags.Diagnostics
+
+ tfModel.JobID = types.StringValue(apiModel.JobID)
+ tfModel.Description = types.StringValue(apiModel.Description)
+ tfModel.JobType = types.StringValue(apiModel.JobType)
+ tfModel.JobVersion = types.StringValue(apiModel.JobVersion)
+
+ // Convert create_time
+ if apiModel.CreateTime != nil {
+ tfModel.CreateTime = types.StringValue(fmt.Sprintf("%v", apiModel.CreateTime))
+ } else {
+ tfModel.CreateTime = types.StringNull()
+ }
+
+ // Convert model_snapshot_id
+ tfModel.ModelSnapshotID = types.StringValue(apiModel.ModelSnapshotID)
+
+ // Convert groups
+ if len(apiModel.Groups) > 0 {
+ groupsSet, d := types.SetValueFrom(ctx, types.StringType, apiModel.Groups)
+ diags.Append(d...)
+ tfModel.Groups = groupsSet
+ } else {
+ tfModel.Groups = types.SetNull(types.StringType)
+ }
+
+ // Convert optional fields
+ tfModel.AllowLazyOpen = types.BoolPointerValue(apiModel.AllowLazyOpen)
+
+ if apiModel.BackgroundPersistInterval != "" {
+ tfModel.BackgroundPersistInterval = types.StringValue(apiModel.BackgroundPersistInterval)
+ }
+
+ if apiModel.CustomSettings != nil {
+ customSettingsJSON, err := json.Marshal(apiModel.CustomSettings)
+ if err != nil {
+ diags.AddError("Failed to marshal custom_settings", err.Error())
+ return diags
+ }
+ tfModel.CustomSettings = jsontypes.NewNormalizedValue(string(customSettingsJSON))
+ } else {
+ tfModel.CustomSettings = jsontypes.NewNormalizedNull()
+ }
+
+ tfModel.DailyModelSnapshotRetentionAfterDays = types.Int64PointerValue(apiModel.DailyModelSnapshotRetentionAfterDays)
+
+ tfModel.ModelSnapshotRetentionDays = types.Int64PointerValue(apiModel.ModelSnapshotRetentionDays)
+
+ if apiModel.RenormalizationWindowDays != nil {
+ tfModel.RenormalizationWindowDays = types.Int64Value(*apiModel.RenormalizationWindowDays)
+ }
+
+ if apiModel.ResultsIndexName != "" {
+ tfModel.ResultsIndexName = types.StringValue(apiModel.ResultsIndexName)
+ }
+
+ tfModel.ResultsRetentionDays = types.Int64PointerValue(apiModel.ResultsRetentionDays)
+
+ // Convert analysis_config
+ tfModel.AnalysisConfig = tfModel.convertAnalysisConfigFromAPI(ctx, &apiModel.AnalysisConfig, &diags)
+
+ // Convert analysis_limits
+ tfModel.AnalysisLimits = tfModel.convertAnalysisLimitsFromAPI(ctx, apiModel.AnalysisLimits, &diags)
+
+ // Convert data_description
+ tfModel.DataDescription = tfModel.convertDataDescriptionFromAPI(ctx, &apiModel.DataDescription, &diags)
+
+ // Convert model_plot_config
+ tfModel.ModelPlotConfig = tfModel.convertModelPlotConfigFromAPI(ctx, apiModel.ModelPlotConfig, &diags)
+
+ // Convert analysis_limits
+ tfModel.AnalysisLimits = tfModel.convertAnalysisLimitsFromAPI(ctx, apiModel.AnalysisLimits, &diags)
+
+ // Convert model_plot_config
+ tfModel.ModelPlotConfig = tfModel.convertModelPlotConfigFromAPI(ctx, apiModel.ModelPlotConfig, &diags)
+
+ return diags
+}
+
+// Helper functions for schema attribute types
+// Conversion helper methods
+func (tfModel *AnomalyDetectionJobTFModel) convertAnalysisConfigFromAPI(ctx context.Context, apiConfig *AnalysisConfigAPIModel, diags *fwdiags.Diagnostics) types.Object {
+ if apiConfig == nil || apiConfig.BucketSpan == "" {
+ return types.ObjectNull(getAnalysisConfigAttrTypes())
+ }
+
+ analysisConfigTF := AnalysisConfigTFModel{
+ BucketSpan: types.StringValue(apiConfig.BucketSpan),
+ }
+
+ // Convert optional string fields
+ if apiConfig.CategorizationFieldName != "" {
+ analysisConfigTF.CategorizationFieldName = types.StringValue(apiConfig.CategorizationFieldName)
+ } else {
+ analysisConfigTF.CategorizationFieldName = types.StringNull()
+ }
+
+ if apiConfig.Latency != "" {
+ analysisConfigTF.Latency = types.StringValue(apiConfig.Latency)
+ } else {
+ analysisConfigTF.Latency = types.StringNull()
+ }
+
+ if apiConfig.ModelPruneWindow != "" {
+ analysisConfigTF.ModelPruneWindow = types.StringValue(apiConfig.ModelPruneWindow)
+ } else {
+ analysisConfigTF.ModelPruneWindow = types.StringNull()
+ }
+
+ if apiConfig.SummaryCountFieldName != "" {
+ analysisConfigTF.SummaryCountFieldName = types.StringValue(apiConfig.SummaryCountFieldName)
+ } else {
+ analysisConfigTF.SummaryCountFieldName = types.StringNull()
+ }
+
+ // Convert boolean fields
+ analysisConfigTF.MultivariateByFields = types.BoolPointerValue(apiConfig.MultivariateByFields)
+
+ // Convert categorization filters
+ if len(apiConfig.CategorizationFilters) > 0 {
+ categorizationFiltersListValue, d := types.ListValueFrom(ctx, types.StringType, apiConfig.CategorizationFilters)
+ diags.Append(d...)
+ analysisConfigTF.CategorizationFilters = categorizationFiltersListValue
+ } else {
+ analysisConfigTF.CategorizationFilters = types.ListNull(types.StringType)
+ }
+
+ // Convert influencers
+ if len(apiConfig.Influencers) > 0 {
+ influencersListValue, d := types.ListValueFrom(ctx, types.StringType, apiConfig.Influencers)
+ diags.Append(d...)
+ analysisConfigTF.Influencers = influencersListValue
+ } else {
+ analysisConfigTF.Influencers = types.ListNull(types.StringType)
+ }
+
+ // Convert detectors
+ if len(apiConfig.Detectors) > 0 {
+ detectorsTF := make([]DetectorTFModel, len(apiConfig.Detectors))
+ for i, detector := range apiConfig.Detectors {
+ detectorsTF[i] = DetectorTFModel{
+ Function: types.StringValue(detector.Function),
+ }
+
+ // Convert optional string fields
+ if detector.FieldName != "" {
+ detectorsTF[i].FieldName = types.StringValue(detector.FieldName)
+ } else {
+ detectorsTF[i].FieldName = types.StringNull()
+ }
+
+ if detector.ByFieldName != "" {
+ detectorsTF[i].ByFieldName = types.StringValue(detector.ByFieldName)
+ } else {
+ detectorsTF[i].ByFieldName = types.StringNull()
+ }
+
+ if detector.OverFieldName != "" {
+ detectorsTF[i].OverFieldName = types.StringValue(detector.OverFieldName)
+ } else {
+ detectorsTF[i].OverFieldName = types.StringNull()
+ }
+
+ if detector.PartitionFieldName != "" {
+ detectorsTF[i].PartitionFieldName = types.StringValue(detector.PartitionFieldName)
+ } else {
+ detectorsTF[i].PartitionFieldName = types.StringNull()
+ }
+
+ if detector.DetectorDescription != "" {
+ detectorsTF[i].DetectorDescription = types.StringValue(detector.DetectorDescription)
+ } else {
+ detectorsTF[i].DetectorDescription = types.StringNull()
+ }
+
+ if detector.ExcludeFrequent != "" {
+ detectorsTF[i].ExcludeFrequent = types.StringValue(detector.ExcludeFrequent)
+ } else {
+ detectorsTF[i].ExcludeFrequent = types.StringNull()
+ }
+
+ // Convert boolean field
+ detectorsTF[i].UseNull = types.BoolPointerValue(detector.UseNull)
+
+ // Convert custom rules
+ if len(detector.CustomRules) > 0 {
+ customRulesTF := make([]CustomRuleTFModel, len(detector.CustomRules))
+ for j, rule := range detector.CustomRules {
+ // Convert actions
+ if len(rule.Actions) > 0 {
+ // Convert interface{} actions to strings
+ actions := make([]string, len(rule.Actions))
+ for k, action := range rule.Actions {
+ if actionStr, ok := action.(string); ok {
+ actions[k] = actionStr
+ }
+ }
+ actionsListValue, d := types.ListValueFrom(ctx, types.StringType, actions)
+ diags.Append(d...)
+ customRulesTF[j].Actions = actionsListValue
+ } else {
+ customRulesTF[j].Actions = types.ListNull(types.StringType)
+ }
+
+ // Convert conditions
+ if len(rule.Conditions) > 0 {
+ conditionsTF := make([]RuleConditionTFModel, len(rule.Conditions))
+ for k, condition := range rule.Conditions {
+ conditionsTF[k] = RuleConditionTFModel{
+ AppliesTo: types.StringValue(condition.AppliesTo),
+ Operator: types.StringValue(condition.Operator),
+ Value: types.Float64Value(condition.Value),
+ }
+ }
+ conditionsListValue, d := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: getRuleConditionAttrTypes()}, conditionsTF)
+ diags.Append(d...)
+ customRulesTF[j].Conditions = conditionsListValue
+ } else {
+ customRulesTF[j].Conditions = types.ListNull(types.ObjectType{AttrTypes: getRuleConditionAttrTypes()})
+ }
+ }
+ customRulesListValue, d := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: getCustomRuleAttrTypes()}, customRulesTF)
+ diags.Append(d...)
+ detectorsTF[i].CustomRules = customRulesListValue
+ } else {
+ detectorsTF[i].CustomRules = types.ListNull(types.ObjectType{AttrTypes: getCustomRuleAttrTypes()})
+ }
+ }
+ detectorsListValue, d := types.ListValueFrom(ctx, types.ObjectType{AttrTypes: getDetectorAttrTypes()}, detectorsTF)
+ diags.Append(d...)
+ analysisConfigTF.Detectors = detectorsListValue
+ } else {
+ analysisConfigTF.Detectors = types.ListNull(types.ObjectType{AttrTypes: getDetectorAttrTypes()})
+ }
+
+ // Convert per_partition_categorization
+ if apiConfig.PerPartitionCategorization != nil {
+ perPartitionCategorizationTF := PerPartitionCategorizationTFModel{
+ Enabled: types.BoolValue(apiConfig.PerPartitionCategorization.Enabled),
+ }
+ perPartitionCategorizationTF.StopOnWarn = types.BoolPointerValue(apiConfig.PerPartitionCategorization.StopOnWarn)
+ perPartitionCategorizationObjectValue, d := types.ObjectValueFrom(ctx, getPerPartitionCategorizationAttrTypes(), perPartitionCategorizationTF)
+ diags.Append(d...)
+ analysisConfigTF.PerPartitionCategorization = perPartitionCategorizationObjectValue
+ } else {
+ analysisConfigTF.PerPartitionCategorization = types.ObjectNull(getPerPartitionCategorizationAttrTypes())
+ }
+
+ analysisConfigObjectValue, d := types.ObjectValueFrom(ctx, getAnalysisConfigAttrTypes(), analysisConfigTF)
+ diags.Append(d...)
+ return analysisConfigObjectValue
+}
+
+func (tfModel *AnomalyDetectionJobTFModel) convertDataDescriptionFromAPI(ctx context.Context, apiDataDescription *DataDescriptionAPIModel, diags *fwdiags.Diagnostics) types.Object {
+ if apiDataDescription == nil {
+ return types.ObjectNull(getDataDescriptionAttrTypes())
+ }
+
+ dataDescriptionTF := DataDescriptionTFModel{}
+
+ if apiDataDescription.TimeField != "" {
+ dataDescriptionTF.TimeField = types.StringValue(apiDataDescription.TimeField)
+ } else {
+ dataDescriptionTF.TimeField = types.StringNull()
+ }
+
+ if apiDataDescription.TimeFormat != "" {
+ dataDescriptionTF.TimeFormat = types.StringValue(apiDataDescription.TimeFormat)
+ } else {
+ dataDescriptionTF.TimeFormat = types.StringNull()
+ }
+
+ if apiDataDescription.Format != "" {
+ dataDescriptionTF.Format = types.StringValue(apiDataDescription.Format)
+ } else {
+ dataDescriptionTF.Format = types.StringNull()
+ }
+
+ if apiDataDescription.FieldDelimiter != "" {
+ dataDescriptionTF.FieldDelimiter = types.StringValue(apiDataDescription.FieldDelimiter)
+ } else {
+ dataDescriptionTF.FieldDelimiter = types.StringNull()
+ }
+
+ if apiDataDescription.QuoteCharacter != "" {
+ dataDescriptionTF.QuoteCharacter = types.StringValue(apiDataDescription.QuoteCharacter)
+ } else {
+ dataDescriptionTF.QuoteCharacter = types.StringNull()
+ }
+
+ dataDescriptionObjectValue, d := types.ObjectValueFrom(ctx, getDataDescriptionAttrTypes(), dataDescriptionTF)
+ diags.Append(d...)
+ return dataDescriptionObjectValue
+}
+
+func (tfModel *AnomalyDetectionJobTFModel) convertAnalysisLimitsFromAPI(ctx context.Context, apiLimits *AnalysisLimitsAPIModel, diags *fwdiags.Diagnostics) types.Object {
+ if apiLimits == nil {
+ return types.ObjectNull(getAnalysisLimitsAttrTypes())
+ }
+
+ analysisLimitsTF := AnalysisLimitsTFModel{
+ CategorizationExamplesLimit: types.Int64PointerValue(apiLimits.CategorizationExamplesLimit),
+ }
+
+ if apiLimits.ModelMemoryLimit != "" {
+ analysisLimitsTF.ModelMemoryLimit = types.StringValue(apiLimits.ModelMemoryLimit)
+ } else {
+ analysisLimitsTF.ModelMemoryLimit = types.StringNull()
+ }
+
+ analysisLimitsObjectValue, d := types.ObjectValueFrom(ctx, getAnalysisLimitsAttrTypes(), analysisLimitsTF)
+ diags.Append(d...)
+ return analysisLimitsObjectValue
+}
+
+func (tfModel *AnomalyDetectionJobTFModel) convertModelPlotConfigFromAPI(ctx context.Context, apiModelPlotConfig *ModelPlotConfigAPIModel, diags *fwdiags.Diagnostics) types.Object {
+ if apiModelPlotConfig == nil {
+ return types.ObjectNull(getModelPlotConfigAttrTypes())
+ }
+
+ modelPlotConfigTF := ModelPlotConfigTFModel{
+ Enabled: types.BoolValue(apiModelPlotConfig.Enabled),
+ }
+
+ if apiModelPlotConfig.Terms != "" {
+ modelPlotConfigTF.Terms = types.StringValue(apiModelPlotConfig.Terms)
+ } else {
+ modelPlotConfigTF.Terms = types.StringNull()
+ }
+
+ modelPlotConfigTF.AnnotationsEnabled = types.BoolPointerValue(apiModelPlotConfig.AnnotationsEnabled)
+
+ modelPlotConfigObjectValue, d := types.ObjectValueFrom(ctx, getModelPlotConfigAttrTypes(), modelPlotConfigTF)
+ diags.Append(d...)
+ return modelPlotConfigObjectValue
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/read.go b/internal/elasticsearch/ml/anomaly_detection_job/read.go
new file mode 100644
index 000000000..7dcdebecb
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/read.go
@@ -0,0 +1,68 @@
+package anomaly_detection_job
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/diagutil"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func (r *anomalyDetectionJobResource) read(ctx context.Context, job *AnomalyDetectionJobTFModel) (bool, fwdiags.Diagnostics) {
+ var diags fwdiags.Diagnostics
+
+ if !r.resourceReady(&diags) {
+ return false, diags
+ }
+
+ jobID := job.JobID.ValueString()
+ tflog.Debug(ctx, fmt.Sprintf("Reading ML anomaly detection job: %s", jobID))
+
+ esClient, err := r.client.GetESClient()
+ if err != nil {
+ diags.AddError("Failed to get Elasticsearch client", err.Error())
+ return false, diags
+ }
+
+ // Get the ML job
+ res, err := esClient.ML.GetJobs(esClient.ML.GetJobs.WithJobID(jobID), esClient.ML.GetJobs.WithContext(ctx))
+ if err != nil {
+ diags.AddError("Failed to get ML anomaly detection job", err.Error())
+ return false, diags
+ }
+ defer res.Body.Close()
+
+ if res.StatusCode == http.StatusNotFound {
+ return false, nil
+ }
+
+ if d := diagutil.CheckErrorFromFW(res, fmt.Sprintf("Unable to get ML anomaly detection job: %s", jobID)); d.HasError() {
+ diags.Append(d...)
+ return false, diags
+ }
+
+ // Parse the response
+ var response struct {
+ Jobs []AnomalyDetectionJobAPIModel `json:"jobs"`
+ }
+ if err := json.NewDecoder(res.Body).Decode(&response); err != nil {
+ diags.AddError("Failed to decode job response", err.Error())
+ return false, diags
+ }
+
+ if len(response.Jobs) == 0 {
+ return false, nil
+ }
+
+ // Convert API response back to TF model
+ diags.Append(job.fromAPIModel(ctx, &response.Jobs[0])...)
+ if diags.HasError() {
+ return false, diags
+ }
+
+ tflog.Debug(ctx, fmt.Sprintf("Successfully read ML anomaly detection job: %s", jobID))
+ return true, diags
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/resource.go b/internal/elasticsearch/ml/anomaly_detection_job/resource.go
new file mode 100644
index 000000000..d26c4f2b0
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/resource.go
@@ -0,0 +1,75 @@
+package anomaly_detection_job
+
+import (
+ "context"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/clients"
+ fwdiags "github.com/hashicorp/terraform-plugin-framework/diag"
+ "github.com/hashicorp/terraform-plugin-framework/path"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+)
+
+func NewAnomalyDetectionJobResource() resource.Resource {
+ return &anomalyDetectionJobResource{}
+}
+
+type anomalyDetectionJobResource struct {
+ client *clients.ApiClient
+}
+
+func (r *anomalyDetectionJobResource) Metadata(_ context.Context, req resource.MetadataRequest, resp *resource.MetadataResponse) {
+ resp.TypeName = req.ProviderTypeName + "_elasticsearch_ml_anomaly_detection_job"
+}
+
+func (r *anomalyDetectionJobResource) Configure(_ context.Context, req resource.ConfigureRequest, resp *resource.ConfigureResponse) {
+ client, diags := clients.ConvertProviderData(req.ProviderData)
+ resp.Diagnostics.Append(diags...)
+ r.client = client
+}
+
+func (r *anomalyDetectionJobResource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) {
+ r.create(ctx, req, resp)
+}
+
+func (r *anomalyDetectionJobResource) Read(ctx context.Context, req resource.ReadRequest, resp *resource.ReadResponse) {
+ var state AnomalyDetectionJobTFModel
+ diags := req.State.Get(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ found, diags := r.read(ctx, &state)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ if !found {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+
+ resp.Diagnostics.Append(resp.State.Set(ctx, &state)...)
+}
+
+func (r *anomalyDetectionJobResource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ r.update(ctx, req, resp)
+}
+
+func (r *anomalyDetectionJobResource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) {
+ r.delete(ctx, req, resp)
+}
+
+// resourceReady checks if the client is ready for API calls
+func (r *anomalyDetectionJobResource) resourceReady(diags *fwdiags.Diagnostics) bool {
+ if r.client == nil {
+ diags.AddError("Client not configured", "Provider client is not configured")
+ return false
+ }
+ return true
+}
+
+func (r *anomalyDetectionJobResource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) {
+ resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp)
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/schema.go b/internal/elasticsearch/ml/anomaly_detection_job/schema.go
new file mode 100644
index 000000000..203aae649
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/schema.go
@@ -0,0 +1,456 @@
+package anomaly_detection_job
+
+import (
+ "context"
+ "regexp"
+
+ "github.com/hashicorp/terraform-plugin-framework-jsontypes/jsontypes"
+ "github.com/hashicorp/terraform-plugin-framework-validators/int64validator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator"
+ "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator"
+ "github.com/hashicorp/terraform-plugin-framework/attr"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/boolplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/int64planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/objectplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/planmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/resource/schema/stringplanmodifier"
+ "github.com/hashicorp/terraform-plugin-framework/schema/validator"
+ "github.com/hashicorp/terraform-plugin-framework/types"
+
+ providerschema "github.com/elastic/terraform-provider-elasticstack/internal/schema"
+)
+
+func (r *anomalyDetectionJobResource) Schema(_ context.Context, _ resource.SchemaRequest, resp *resource.SchemaResponse) {
+ resp.Schema = GetSchema()
+}
+
+func GetSchema() schema.Schema {
+ return schema.Schema{
+ MarkdownDescription: "Creates and manages Machine Learning anomaly detection jobs. See the [ML Job API documentation](https://www.elastic.co/guide/en/elasticsearch/reference/current/ml-put-job.html) for more details.",
+ Blocks: map[string]schema.Block{
+ "elasticsearch_connection": providerschema.GetEsFWConnectionBlock("elasticsearch_connection", false),
+ },
+ Attributes: map[string]schema.Attribute{
+ "id": schema.StringAttribute{
+ MarkdownDescription: "Internal identifier of the resource",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "job_id": schema.StringAttribute{
+ MarkdownDescription: "The identifier for the anomaly detection job. This identifier can contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters.",
+ Required: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ },
+ Validators: []validator.String{
+ stringvalidator.LengthBetween(1, 64),
+ stringvalidator.RegexMatches(regexp.MustCompile(`^[a-z0-9][a-z0-9_-]*[a-z0-9]$|^[a-z0-9]$`), "must contain lowercase alphanumeric characters (a-z and 0-9), hyphens, and underscores. It must start and end with alphanumeric characters"),
+ },
+ },
+ "description": schema.StringAttribute{
+ MarkdownDescription: "A description of the job.",
+ Optional: true,
+ },
+ "groups": schema.SetAttribute{
+ MarkdownDescription: "A set of job groups. A job can belong to no groups or many.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "analysis_config": schema.SingleNestedAttribute{
+ MarkdownDescription: "Specifies how to analyze the data. After you create a job, you cannot change the analysis configuration; all the properties are informational.",
+ Required: true,
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.RequiresReplace(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "bucket_span": schema.StringAttribute{
+ MarkdownDescription: "The size of the interval that the analysis is aggregated into, typically between 15m and 1h. If the anomaly detector is expecting to see data at near real-time frequency, then the bucket_span should be set to a value around 10 times the time between ingested documents. For example, if data comes every second, bucket_span should be 10s; if data comes every 5 minutes, bucket_span should be 50m. For sparse or batch data, use larger bucket_span values.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.RegexMatches(regexp.MustCompile(`^\d+[nsumdh]$`), "must be a valid time interval (e.g., 15m, 1h)"),
+ },
+ },
+ "categorization_field_name": schema.StringAttribute{
+ MarkdownDescription: "For categorization jobs only. The name of the field to categorize.",
+ Optional: true,
+ },
+ "categorization_filters": schema.ListAttribute{
+ MarkdownDescription: "For categorization jobs only. An array of regular expressions. A categorization message is matched against each regex in the order they are listed in the array.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "detectors": schema.ListNestedAttribute{
+ MarkdownDescription: "Detector configuration objects. Detectors identify the anomaly detection functions and the fields on which they operate.",
+ Required: true,
+ Validators: []validator.List{
+ listvalidator.SizeAtLeast(1),
+ },
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "function": schema.StringAttribute{
+ MarkdownDescription: "The analysis function that is used. For example, count, rare, mean, min, max, sum.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("count", "high_count", "low_count", "non_zero_count", "high_non_zero_count", "low_non_zero_count", "distinct_count", "high_distinct_count", "low_distinct_count", "info_content", "high_info_content", "low_info_content", "min", "max", "median", "high_median", "low_median", "mean", "high_mean", "low_mean", "metric", "varp", "high_varp", "low_varp", "sum", "high_sum", "low_sum", "non_null_sum", "high_non_null_sum", "low_non_null_sum", "rare", "freq_rare", "time_of_day", "time_of_week", "lat_long"),
+ },
+ },
+ "field_name": schema.StringAttribute{
+ MarkdownDescription: "The field that the detector function analyzes. Some functions require a field. Functions that don't require a field are count, rare, and freq_rare.",
+ Optional: true,
+ },
+ "by_field_name": schema.StringAttribute{
+ MarkdownDescription: "The field used to split the data. In particular, this property is used for analyzing the splits with respect to their own history. It is used for finding unusual values in the context of the split.",
+ Optional: true,
+ },
+ "over_field_name": schema.StringAttribute{
+ MarkdownDescription: "The field used to split the data. In particular, this property is used for analyzing the splits with respect to the history of all splits. It is used for finding unusual values in the population of all splits.",
+ Optional: true,
+ },
+ "partition_field_name": schema.StringAttribute{
+ MarkdownDescription: "The field used to segment the analysis. When you use this property, you have completely independent baselines for each value of this field.",
+ Optional: true,
+ },
+ "detector_description": schema.StringAttribute{
+ MarkdownDescription: "A description of the detector.",
+ Optional: true,
+ },
+ "exclude_frequent": schema.StringAttribute{
+ MarkdownDescription: "Contains one of the following values: all, none, by, or over.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("all", "none", "by", "over"),
+ },
+ },
+ "use_null": schema.BoolAttribute{
+ MarkdownDescription: "Defines whether a new series is used as the null series when there is no value for the by or partition fields.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "custom_rules": schema.ListNestedAttribute{
+ MarkdownDescription: "Custom rules enable you to customize the way detectors operate.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "actions": schema.ListAttribute{
+ MarkdownDescription: "The set of actions to be triggered when the rule applies. If more than one action is specified the effects of all actions are combined.",
+ Optional: true,
+ ElementType: types.StringType,
+ Validators: []validator.List{
+ listvalidator.ValueStringsAre(
+ stringvalidator.OneOf("skip_result", "skip_model_update"),
+ ),
+ },
+ },
+ "conditions": schema.ListNestedAttribute{
+ MarkdownDescription: "An array of numeric conditions when the rule applies.",
+ Optional: true,
+ NestedObject: schema.NestedAttributeObject{
+ Attributes: map[string]schema.Attribute{
+ "applies_to": schema.StringAttribute{
+ MarkdownDescription: "Specifies the result property to which the condition applies.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("actual", "typical", "diff_from_typical", "time"),
+ },
+ },
+ "operator": schema.StringAttribute{
+ MarkdownDescription: "Specifies the condition operator.",
+ Required: true,
+ Validators: []validator.String{
+ stringvalidator.OneOf("gt", "gte", "lt", "lte"),
+ },
+ },
+ "value": schema.Float64Attribute{
+ MarkdownDescription: "The value that is compared against the applies_to field using the operator.",
+ Required: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "influencers": schema.ListAttribute{
+ MarkdownDescription: "A comma separated list of influencer field names. Typically these can be the by, over, or partition fields that are used in the detector configuration.",
+ Optional: true,
+ ElementType: types.StringType,
+ },
+ "latency": schema.StringAttribute{
+ MarkdownDescription: "The size of the window in which to expect data that is out of time order. If you specify a non-zero value, it must be greater than or equal to one second.",
+ Optional: true,
+ },
+ "model_prune_window": schema.StringAttribute{
+ MarkdownDescription: "Advanced configuration option. The time interval (in days) between pruning the model.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "multivariate_by_fields": schema.BoolAttribute{
+ MarkdownDescription: "This functionality is reserved for internal use. It is not supported for use in customer environments and is not subject to the support SLA of official GA features.",
+ Optional: true,
+ },
+ "per_partition_categorization": schema.SingleNestedAttribute{
+ MarkdownDescription: "Settings related to how categorization interacts with partition fields.",
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "enabled": schema.BoolAttribute{
+ MarkdownDescription: "To enable this setting, you must also set the partition_field_name property to the same value in every detector that uses the keyword mlcategory. Otherwise, job creation fails.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "stop_on_warn": schema.BoolAttribute{
+ MarkdownDescription: "This setting can be set to true only if per-partition categorization is enabled.",
+ Optional: true,
+ },
+ },
+ },
+ "summary_count_field_name": schema.StringAttribute{
+ MarkdownDescription: "If this property is specified, the data that is fed to the job is expected to be pre-summarized.",
+ Optional: true,
+ },
+ },
+ },
+ "analysis_limits": schema.SingleNestedAttribute{
+ MarkdownDescription: "Limits can be applied for the resources required to hold the mathematical models in memory.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.UseStateForUnknown(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "categorization_examples_limit": schema.Int64Attribute{
+ MarkdownDescription: "The maximum number of examples stored per category in memory and in the results data store.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.Int64{
+ int64validator.AtLeast(0),
+ },
+ },
+ "model_memory_limit": schema.StringAttribute{
+ MarkdownDescription: "The approximate maximum amount of memory resources that are required for analytical processing.",
+ Optional: true,
+ Validators: []validator.String{
+ stringvalidator.RegexMatches(regexp.MustCompile(`^\d+[kmgtKMGT]?[bB]?$`), "must be a valid memory size (e.g., 10mb, 1gb)"),
+ },
+ },
+ },
+ },
+ "data_description": schema.SingleNestedAttribute{
+ MarkdownDescription: "Defines the format of the input data when you send data to the job by using the post data API.",
+ Required: true,
+ PlanModifiers: []planmodifier.Object{
+ objectplanmodifier.RequiresReplace(),
+ },
+ Attributes: map[string]schema.Attribute{
+ "time_field": schema.StringAttribute{
+ MarkdownDescription: "The name of the field that contains the timestamp.",
+ Optional: true,
+ },
+ "time_format": schema.StringAttribute{
+ MarkdownDescription: "The time format, which can be epoch, epoch_ms, or a custom pattern.",
+ Optional: true,
+ },
+ "format": schema.StringAttribute{
+ MarkdownDescription: "Only JSON format is supported at this time.",
+ Optional: true,
+ Computed: true,
+ Validators: []validator.String{
+ // TODO: TB If only json is supported then why are we allowing delimited?
+ stringvalidator.OneOf("json", "delimited"),
+ },
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "field_delimiter": schema.StringAttribute{
+ MarkdownDescription: "The character used to delimit fields in the data. Only applicable when format is delimited.",
+ Optional: true,
+ },
+ "quote_character": schema.StringAttribute{
+ MarkdownDescription: "The character used to quote fields in the data. Only applicable when format is delimited.",
+ Optional: true,
+ },
+ },
+ },
+ "model_plot_config": schema.SingleNestedAttribute{
+ MarkdownDescription: "This advanced configuration option stores model information along with the results. It provides a more detailed view into anomaly detection.",
+ Optional: true,
+ Attributes: map[string]schema.Attribute{
+ "enabled": schema.BoolAttribute{
+ MarkdownDescription: "If true, enables calculation and storage of the model bounds for each entity that is being analyzed.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "annotations_enabled": schema.BoolAttribute{
+ MarkdownDescription: "If true, enables calculation and storage of the model change annotations for each entity that is being analyzed.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "terms": schema.StringAttribute{
+ MarkdownDescription: "Limits data collection to this comma separated list of partition or by field values. If terms are not specified or it is an empty string, no filtering is applied.",
+ Optional: true,
+ },
+ },
+ },
+ "allow_lazy_open": schema.BoolAttribute{
+ MarkdownDescription: "Advanced configuration option. Specifies whether this job can open when there is insufficient machine learning node capacity for it to be immediately assigned to a node.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Bool{
+ boolplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "background_persist_interval": schema.StringAttribute{
+ MarkdownDescription: "Advanced configuration option. The time between each periodic persistence of the model.",
+ Optional: true,
+ },
+ "custom_settings": schema.StringAttribute{
+ MarkdownDescription: "Advanced configuration option. Contains custom meta data about the job. For example, it can contain custom URL information.",
+ Optional: true,
+ CustomType: jsontypes.NormalizedType{},
+ },
+ "daily_model_snapshot_retention_after_days": schema.Int64Attribute{
+ MarkdownDescription: "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.Int64{
+ int64validator.AtLeast(0),
+ },
+ },
+ "model_snapshot_retention_days": schema.Int64Attribute{
+ MarkdownDescription: "Advanced configuration option, which affects the automatic removal of old model snapshots for this job.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.Int64{
+ int64planmodifier.UseStateForUnknown(),
+ },
+ Validators: []validator.Int64{
+ int64validator.AtLeast(0),
+ },
+ },
+ "renormalization_window_days": schema.Int64Attribute{
+ MarkdownDescription: "Advanced configuration option. The period over which adjustments to the score are applied, as new data is seen.",
+ Optional: true,
+ Validators: []validator.Int64{
+ int64validator.AtLeast(0),
+ },
+ },
+ "results_index_name": schema.StringAttribute{
+ MarkdownDescription: "A text string that affects the name of the machine learning results index.",
+ Optional: true,
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.RequiresReplace(),
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "results_retention_days": schema.Int64Attribute{
+ MarkdownDescription: "Advanced configuration option. The period of time (in days) that results are retained.",
+ Optional: true,
+ Validators: []validator.Int64{
+ int64validator.AtLeast(0),
+ },
+ },
+
+ // Read-only computed attributes
+ "create_time": schema.StringAttribute{
+ MarkdownDescription: "The time the job was created.",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "job_type": schema.StringAttribute{
+ MarkdownDescription: "Reserved for future use, currently set to anomaly_detector.",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "job_version": schema.StringAttribute{
+ MarkdownDescription: "The version of Elasticsearch when the job was created.",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ "model_snapshot_id": schema.StringAttribute{
+ MarkdownDescription: "A numerical character string that uniquely identifies the model snapshot.",
+ Computed: true,
+ PlanModifiers: []planmodifier.String{
+ stringplanmodifier.UseStateForUnknown(),
+ },
+ },
+ },
+ }
+}
+
+func getAnalysisConfigAttrTypes() map[string]attr.Type {
+ return GetSchema().Attributes["analysis_config"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes()
+}
+
+func getDetectorAttrTypes() map[string]attr.Type {
+ analysisConfigAttrs := getAnalysisConfigAttrTypes()
+ detectorsList := analysisConfigAttrs["detectors"].(types.ListType)
+ detectorsObj := detectorsList.ElemType.(types.ObjectType)
+ return detectorsObj.AttrTypes
+}
+
+func getCustomRuleAttrTypes() map[string]attr.Type {
+ detectorAttrs := getDetectorAttrTypes()
+ customRulesList := detectorAttrs["custom_rules"].(types.ListType)
+ customRulesObj := customRulesList.ElemType.(types.ObjectType)
+ return customRulesObj.AttrTypes
+}
+
+func getRuleConditionAttrTypes() map[string]attr.Type {
+ customRuleAttrs := getCustomRuleAttrTypes()
+ conditionsList := customRuleAttrs["conditions"].(types.ListType)
+ conditionsObj := conditionsList.ElemType.(types.ObjectType)
+ return conditionsObj.AttrTypes
+}
+
+func getPerPartitionCategorizationAttrTypes() map[string]attr.Type {
+ analysisConfigAttrs := getAnalysisConfigAttrTypes()
+ perPartitionObj := analysisConfigAttrs["per_partition_categorization"].(types.ObjectType)
+ return perPartitionObj.AttrTypes
+}
+
+func getAnalysisLimitsAttrTypes() map[string]attr.Type {
+ return GetSchema().Attributes["analysis_limits"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes()
+}
+
+func getDataDescriptionAttrTypes() map[string]attr.Type {
+ return GetSchema().Attributes["data_description"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes()
+}
+
+func getModelPlotConfigAttrTypes() map[string]attr.Type {
+ return GetSchema().Attributes["model_plot_config"].GetType().(attr.TypeWithAttributeTypes).AttributeTypes()
+}
diff --git a/internal/elasticsearch/ml/anomaly_detection_job/update.go b/internal/elasticsearch/ml/anomaly_detection_job/update.go
new file mode 100644
index 000000000..715df169e
--- /dev/null
+++ b/internal/elasticsearch/ml/anomaly_detection_job/update.go
@@ -0,0 +1,175 @@
+package anomaly_detection_job
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "fmt"
+
+ "github.com/elastic/terraform-provider-elasticstack/internal/diagutil"
+ "github.com/elastic/terraform-provider-elasticstack/internal/utils"
+ "github.com/hashicorp/terraform-plugin-framework/resource"
+ "github.com/hashicorp/terraform-plugin-framework/types/basetypes"
+ "github.com/hashicorp/terraform-plugin-log/tflog"
+)
+
+func (r *anomalyDetectionJobResource) update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) {
+ if !r.resourceReady(&resp.Diagnostics) {
+ return
+ }
+
+ var plan AnomalyDetectionJobTFModel
+ diags := req.Plan.Get(ctx, &plan)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ var state AnomalyDetectionJobTFModel
+ diags = req.State.Get(ctx, &state)
+ resp.Diagnostics.Append(diags...)
+ if resp.Diagnostics.HasError() {
+ return
+ }
+
+ jobID := state.JobID.ValueString()
+
+ tflog.Debug(ctx, fmt.Sprintf("Updating ML anomaly detection job: %s", jobID))
+
+ // Note: Many ML job properties cannot be updated after creation.
+ // Only certain properties like description, groups, model_plot_config,
+ // analysis_limits.model_memory_limit, renormalization_window_days,
+ // results_retention_days, custom_settings, background_persist_interval,
+ // allow_lazy_open, daily_model_snapshot_retention_after_days,
+ // and model_snapshot_retention_days can be updated.
+
+ // Create update body with only updatable fields
+ updateBody := &AnomalyDetectionJobUpdateAPIModel{}
+ hasChanges := false
+
+ if !plan.Description.Equal(state.Description) {
+ updateBody.Description = utils.Pointer(plan.Description.ValueString())
+ hasChanges = true
+ }
+
+ if !plan.Groups.Equal(state.Groups) {
+ var groups []string
+ plan.Groups.ElementsAs(ctx, &groups, false)
+ updateBody.Groups = groups
+ hasChanges = true
+ }
+
+ if !plan.ModelPlotConfig.Equal(state.ModelPlotConfig) {
+ var modelPlotConfig ModelPlotConfigTFModel
+ plan.ModelPlotConfig.As(ctx, &modelPlotConfig, basetypes.ObjectAsOptions{})
+ apiModelPlotConfig := &ModelPlotConfigAPIModel{
+ Enabled: modelPlotConfig.Enabled.ValueBool(),
+ AnnotationsEnabled: utils.Pointer(modelPlotConfig.AnnotationsEnabled.ValueBool()),
+ Terms: modelPlotConfig.Terms.ValueString(),
+ }
+ updateBody.ModelPlotConfig = apiModelPlotConfig
+ hasChanges = true
+ }
+
+ if !plan.AnalysisLimits.Equal(state.AnalysisLimits) {
+ var analysisLimits AnalysisLimitsTFModel
+ plan.AnalysisLimits.As(ctx, &analysisLimits, basetypes.ObjectAsOptions{})
+ apiAnalysisLimits := &AnalysisLimitsAPIModel{
+ ModelMemoryLimit: analysisLimits.ModelMemoryLimit.ValueString(),
+ }
+ if !analysisLimits.CategorizationExamplesLimit.IsNull() {
+ apiAnalysisLimits.CategorizationExamplesLimit = utils.Pointer(analysisLimits.CategorizationExamplesLimit.ValueInt64())
+ }
+ updateBody.AnalysisLimits = apiAnalysisLimits
+ hasChanges = true
+ }
+
+ if !plan.AllowLazyOpen.Equal(state.AllowLazyOpen) {
+ updateBody.AllowLazyOpen = utils.Pointer(plan.AllowLazyOpen.ValueBool())
+ hasChanges = true
+ }
+
+ if !plan.BackgroundPersistInterval.Equal(state.BackgroundPersistInterval) && !plan.BackgroundPersistInterval.IsNull() {
+ updateBody.BackgroundPersistInterval = utils.Pointer(plan.BackgroundPersistInterval.ValueString())
+ hasChanges = true
+ }
+
+ if !plan.CustomSettings.Equal(state.CustomSettings) && !plan.CustomSettings.IsNull() {
+ var customSettings map[string]interface{}
+ if err := json.Unmarshal([]byte(plan.CustomSettings.ValueString()), &customSettings); err != nil {
+ resp.Diagnostics.AddError("Failed to parse custom_settings", err.Error())
+ return
+ }
+ updateBody.CustomSettings = customSettings
+ hasChanges = true
+ }
+
+ if !plan.DailyModelSnapshotRetentionAfterDays.Equal(state.DailyModelSnapshotRetentionAfterDays) && !plan.DailyModelSnapshotRetentionAfterDays.IsNull() {
+ updateBody.DailyModelSnapshotRetentionAfterDays = utils.Pointer(plan.DailyModelSnapshotRetentionAfterDays.ValueInt64())
+ hasChanges = true
+ }
+
+ if !plan.ModelSnapshotRetentionDays.Equal(state.ModelSnapshotRetentionDays) && !plan.ModelSnapshotRetentionDays.IsNull() {
+ updateBody.ModelSnapshotRetentionDays = utils.Pointer(plan.ModelSnapshotRetentionDays.ValueInt64())
+ hasChanges = true
+ }
+
+ if !plan.RenormalizationWindowDays.Equal(state.RenormalizationWindowDays) && !plan.RenormalizationWindowDays.IsNull() {
+ updateBody.RenormalizationWindowDays = utils.Pointer(plan.RenormalizationWindowDays.ValueInt64())
+ hasChanges = true
+ }
+
+ if !plan.ResultsRetentionDays.Equal(state.ResultsRetentionDays) && !plan.ResultsRetentionDays.IsNull() {
+ updateBody.ResultsRetentionDays = utils.Pointer(plan.ResultsRetentionDays.ValueInt64())
+ hasChanges = true
+ }
+
+ // Only proceed with update if there are changes
+ if !hasChanges {
+ tflog.Debug(ctx, fmt.Sprintf("No updates needed for ML anomaly detection job: %s", jobID))
+ resp.Diagnostics.Append(resp.State.Set(ctx, plan)...)
+ return
+ }
+
+ esClient, err := r.client.GetESClient()
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to get Elasticsearch client", err.Error())
+ return
+ }
+
+ // Marshal the update body to JSON
+ body, err := json.Marshal(updateBody)
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to marshal job update", err.Error())
+ return
+ }
+
+ // Update the ML job
+ res, err := esClient.ML.UpdateJob(jobID, bytes.NewReader(body), esClient.ML.UpdateJob.WithContext(ctx))
+ if err != nil {
+ resp.Diagnostics.AddError("Failed to update ML anomaly detection job", err.Error())
+ return
+ }
+ defer res.Body.Close()
+
+ if diags := diagutil.CheckErrorFromFW(res, fmt.Sprintf("Unable to update ML anomaly detection job: %s", jobID)); diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+
+ // Read the updated job to get the current state
+ found, diags := r.read(ctx, &plan)
+ if diags.HasError() {
+ resp.Diagnostics.Append(diags...)
+ return
+ }
+ if !found {
+ resp.State.RemoveResource(ctx)
+ return
+ }
+
+ // Set the updated state
+ resp.Diagnostics.Append(resp.State.Set(ctx, &plan)...)
+
+ tflog.Debug(ctx, fmt.Sprintf("Successfully updated ML anomaly detection job: %s", jobID))
+}
diff --git a/provider/plugin_framework.go b/provider/plugin_framework.go
index bba736241..eb8752835 100644
--- a/provider/plugin_framework.go
+++ b/provider/plugin_framework.go
@@ -10,6 +10,7 @@ import (
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/data_stream_lifecycle"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/index"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/index/indices"
+ "github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/ml/anomaly_detection_job"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/security/api_key"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/security/role_mapping"
"github.com/elastic/terraform-provider-elasticstack/internal/elasticsearch/security/system_user"
@@ -116,5 +117,6 @@ func (p *Provider) Resources(ctx context.Context) []func() resource.Resource {
maintenance_window.NewResource,
enrich.NewEnrichPolicyResource,
role_mapping.NewRoleMappingResource,
+ anomaly_detection_job.NewAnomalyDetectionJobResource,
}
}