diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml index cbf806aad..47eb58721 100644 --- a/.github/workflows/go.yml +++ b/.github/workflows/go.yml @@ -21,7 +21,8 @@ jobs: - name: Install terraform uses: hashicorp/setup-terraform@v2 with: - terraform_version: "0.14.x" + terraform_version: "1.x.x" + terraform_wrapper: false - name: Cache Go Modules uses: actions/cache@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index ef0490875..01b82e577 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,9 +1,33 @@ -# 0.6.0 (Unreleased) +# 0.7.0 (Unreleased) FEATURES: * resource/deployment: Utilise the template migration API to build the base update request when changing `deployment_template_id`. This results in more reliable changes between deployment templates. ([#547](https://github.com/elastic/terraform-provider-ec/issues/547)) +# 0.6.0 (Unreleased) + +FEATURES: + +Migration to [TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) + +BREAKING CHANGES: + +New schema for `ec_deployment` + +BUG FIXES: + +[#336](https://github.com/elastic/terraform-provider-ec/issues/336) +[#467](https://github.com/elastic/terraform-provider-ec/issues/467) +[#445](https://github.com/elastic/terraform-provider-ec/issues/445) + +NOTES + +* The migration is based on 0.4.1, so all changes from 0.5.0 are omitted. + +* State upgrade is not yet implemented for `ec_deployment`. + The recommended way to proceed with existing TF resources is [state import](https://developer.hashicorp.com/terraform/cli/import#state-only). + However, this doesn't import user passwords and secret tokens. + # 0.5.0 (Oct 12, 2022) FEATURES: diff --git a/NOTICE b/NOTICE old mode 100755 new mode 100644 index 89a3c8cff..e8817dccb --- a/NOTICE +++ b/NOTICE @@ -1,85 +1,83 @@ terraform-provider-ec -Copyright 2023 Elasticsearch B.V. +Copyright 2022-2023 Elasticsearch B.V. This product includes software developed at Elasticsearch B.V. and third-party software developed by the licenses listed below. ========================================================================= -github.com/davecgh/go-spew 0BSD -github.com/agext/levenshtein Apache-2.0 -github.com/apparentlymart/go-textseg/v13 Apache-2.0 -github.com/elastic/cloud-sdk-go Apache-2.0 -github.com/go-logr/logr Apache-2.0 -github.com/go-logr/stdr Apache-2.0 -github.com/go-openapi/analysis Apache-2.0 -github.com/go-openapi/errors Apache-2.0 -github.com/go-openapi/jsonpointer Apache-2.0 -github.com/go-openapi/jsonreference Apache-2.0 -github.com/go-openapi/loads Apache-2.0 -github.com/go-openapi/runtime Apache-2.0 -github.com/go-openapi/spec Apache-2.0 -github.com/go-openapi/strfmt Apache-2.0 -github.com/go-openapi/swag Apache-2.0 -github.com/go-openapi/validate Apache-2.0 -github.com/oklog/run Apache-2.0 -github.com/oklog/ulid Apache-2.0 -github.com/opentracing/opentracing-go Apache-2.0 -go.mongodb.org/mongo-driver Apache-2.0 -go.opentelemetry.io/otel/trace Apache-2.0 -go.opentelemetry.io/otel Apache-2.0 -google.golang.org/appengine Apache-2.0 -google.golang.org/genproto Apache-2.0 -google.golang.org/grpc Apache-2.0 -gopkg.in/yaml.v2 Apache-2.0 -gopkg.in/yaml.v3 Apache-2.0 -github.com/vmihailenco/msgpack/v4 BSD-2-Clause -github.com/vmihailenco/msgpack BSD-2-Clause -github.com/vmihailenco/tagparser BSD-2-Clause -github.com/puerkitobio/purell BSD-3-Clause -github.com/puerkitobio/urlesc BSD-3-Clause -github.com/golang/protobuf BSD-3-Clause -github.com/google/go-cmp BSD-3-Clause -github.com/pmezard/go-difflib BSD-3-Clause -golang.org/x/crypto BSD-3-Clause -golang.org/x/net BSD-3-Clause -golang.org/x/sys BSD-3-Clause -golang.org/x/text BSD-3-Clause -google.golang.org/protobuf BSD-3-Clause -github.com/apparentlymart/go-cidr MIT -github.com/asaskevich/govalidator MIT -github.com/blang/semver/v4 MIT -github.com/fatih/color MIT -github.com/hashicorp/go-cty MIT -github.com/hashicorp/go-hclog MIT -github.com/josharian/intern MIT -github.com/mailru/easyjson MIT -github.com/mattn/go-colorable MIT -github.com/mattn/go-isatty MIT -github.com/mitchellh/copystructure MIT -github.com/mitchellh/go-testing-interface MIT -github.com/mitchellh/go-wordwrap MIT -github.com/mitchellh/mapstructure MIT -github.com/mitchellh/reflectwalk MIT -github.com/stretchr/testify MIT -github.com/zclconf/go-cty MIT -github.com/hashicorp/go-checkpoint MPL-2.0 -github.com/hashicorp/go-cleanhttp MPL-2.0 -github.com/hashicorp/go-multierror MPL-2.0 -github.com/hashicorp/hc-install MPL-2.0 -github.com/hashicorp/hcl/v2 MPL-2.0 -github.com/hashicorp/logutils MPL-2.0 -github.com/hashicorp/terraform-json MPL-2.0 -github.com/hashicorp/terraform-plugin-log MPL-2.0 -github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0 -github.com/hashicorp/yamux MPL-2.0 -github.com/hashicorp/errwrap MPL-2.0-no-copyleft-exception -github.com/hashicorp/go-plugin MPL-2.0-no-copyleft-exception -github.com/hashicorp/go-uuid MPL-2.0-no-copyleft-exception -github.com/hashicorp/go-version MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-exec MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-plugin-go MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-registry-address MPL-2.0-no-copyleft-exception -github.com/hashicorp/terraform-svchost MPL-2.0-no-copyleft-exception +github.com/davecgh/go-spew 0BSD +github.com/agext/levenshtein Apache-2.0 +github.com/apparentlymart/go-textseg/v13 Apache-2.0 +github.com/elastic/cloud-sdk-go Apache-2.0 +github.com/go-openapi/analysis Apache-2.0 +github.com/go-openapi/errors Apache-2.0 +github.com/go-openapi/jsonpointer Apache-2.0 +github.com/go-openapi/jsonreference Apache-2.0 +github.com/go-openapi/loads Apache-2.0 +github.com/go-openapi/runtime Apache-2.0 +github.com/go-openapi/spec Apache-2.0 +github.com/go-openapi/strfmt Apache-2.0 +github.com/go-openapi/swag Apache-2.0 +github.com/go-openapi/validate Apache-2.0 +github.com/oklog/run Apache-2.0 +github.com/oklog/ulid Apache-2.0 +github.com/opentracing/opentracing-go Apache-2.0 +go.mongodb.org/mongo-driver Apache-2.0 +google.golang.org/appengine Apache-2.0 +google.golang.org/genproto Apache-2.0 +google.golang.org/grpc Apache-2.0 +gopkg.in/yaml.v2 Apache-2.0 +gopkg.in/yaml.v3 Apache-2.0 +github.com/vmihailenco/msgpack/v4 BSD-2-Clause +github.com/vmihailenco/msgpack BSD-2-Clause +github.com/vmihailenco/tagparser BSD-2-Clause +github.com/golang/protobuf BSD-3-Clause +github.com/google/go-cmp BSD-3-Clause +github.com/pmezard/go-difflib BSD-3-Clause +golang.org/x/crypto BSD-3-Clause +golang.org/x/exp BSD-3-Clause +golang.org/x/net BSD-3-Clause +golang.org/x/sys BSD-3-Clause +golang.org/x/text BSD-3-Clause +google.golang.org/protobuf BSD-3-Clause +github.com/apparentlymart/go-cidr MIT +github.com/asaskevich/govalidator MIT +github.com/blang/semver/v4 MIT +github.com/blang/semver MIT +github.com/fatih/color MIT +github.com/hashicorp/go-cty MIT +github.com/hashicorp/go-hclog MIT +github.com/josharian/intern MIT +github.com/mailru/easyjson MIT +github.com/mattn/go-colorable MIT +github.com/mattn/go-isatty MIT +github.com/mitchellh/copystructure MIT +github.com/mitchellh/go-testing-interface MIT +github.com/mitchellh/go-wordwrap MIT +github.com/mitchellh/mapstructure MIT +github.com/mitchellh/reflectwalk MIT +github.com/stretchr/testify MIT +github.com/zclconf/go-cty MIT +github.com/hashicorp/errwrap MPL-2.0 +github.com/hashicorp/go-checkpoint MPL-2.0 +github.com/hashicorp/go-plugin MPL-2.0 +github.com/hashicorp/go-uuid MPL-2.0 +github.com/hashicorp/hc-install MPL-2.0 +github.com/hashicorp/terraform-exec MPL-2.0 +github.com/hashicorp/terraform-json MPL-2.0 +github.com/hashicorp/terraform-plugin-framework-validators MPL-2.0 +github.com/hashicorp/terraform-plugin-sdk/v2 MPL-2.0 +github.com/hashicorp/terraform-svchost MPL-2.0 +github.com/hashicorp/go-cleanhttp MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-multierror MPL-2.0-no-copyleft-exception +github.com/hashicorp/go-version MPL-2.0-no-copyleft-exception +github.com/hashicorp/hcl/v2 MPL-2.0-no-copyleft-exception +github.com/hashicorp/logutils MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-framework MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-go MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-plugin-log MPL-2.0-no-copyleft-exception +github.com/hashicorp/terraform-registry-address MPL-2.0-no-copyleft-exception +github.com/hashicorp/yamux MPL-2.0-no-copyleft-exception ========================================================================= diff --git a/README.md b/README.md index 7c19b2c77..16a0eb2e6 100644 --- a/README.md +++ b/README.md @@ -72,9 +72,15 @@ resource "ec_deployment" "example_minimal" { deployment_template_id = "aws-io-optimized-v2" # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = { + topology = {} + } } ``` @@ -114,3 +120,123 @@ $ export EC_API_KEY="" ``` After doing so, you can navigate to any of our examples in `./examples` and try one. + +### Moving to TF Framework and schema change for `ec_deployment` resource. + +v0.6.0 contains migration to [TF Plugin Framework](https://developer.hashicorp.com/terraform/plugin/framework) and intoduces new schema for `ec_deployment` resource: + +- switching to attributes syntax instead of blocks for almost all definitions that used to be blocks. It means that, for example, a definition like `elasticsearch {...}` has to be changed to `elasticsearch = {...}`, e.g. + +```hcl +resource "ec_deployment" "defaults" { + name = "example" + region = "us-east-1" + version = data.ec_stack.latest.version + deployment_template_id = "aws-io-optimized-v2" + + elasticsearch = { + hot = { + autoscaling = {} + } + } + + kibana = { + topology = {} + } + + enterprise_search = { + zone_count = 1 + } +} +``` + +- `topology` attribute of `elasticsearch` is replaced with a number of dedicated attributes, one per tier, e.g. + +``` + elasticsearch { + topology { + id = "hot_content" + size = "1g" + autoscaling { + max_size = "8g" + } + } + topology { + id = "warm" + size = "2g" + autoscaling { + max_size = "15g" + } + } + } +``` + +has to be converted to + +``` + elasticsearch = { + hot = { + size = "1g" + autoscaling = { + max_size = "8g" + } + } + + warm = { + size = "2g" + autoscaling = { + max_size = "15g" + } + } + } + +``` + +- due to some existing limitations of TF, nested attributes that are nested inside other nested attributes cannot be `Computed`. It means that all such attributes have to be mentioned in configurations even if they are empty. E.g., a definition of `elasticsearch` has to include all topology elements (tiers) that have non-zero size or can be scaled up (if autoscaling is enabled) in the corresponding template. For example, the simplest definition of `elasticsearch` for `aws-io-optimized-v2` template is + +```hcl +resource "ec_deployment" "defaults" { + name = "example" + region = "us-east-1" + version = data.ec_stack.latest.version + deployment_template_id = "aws-io-optimized-v2" + + elasticsearch = { + hot = { + autoscaling = {} + } + } +} +``` + +Please note that the snippet explicitly mentions `hot` tier with `autoscaling` attribute even despite the fact that they are empty. + +- a lot of attributes that used to be collections (e.g. lists and sets) are converted to sigletons, e.g. `elasticsearch`, `apm`, `kibana`, `enterprise_search`, `observability`, `topology`, `autoscaling`, etc. Please note that, generally, users are not expected to make any change to their existing configuration to address this particular change (besides moving from block to attribute syntax). All these components used to exist in single instances, so the change is mostly syntactical, taking into account the switch to attributes instead of blocks (otherwise if we kept list for configs, `config {}` had to be rewritten in `config = [{}]` with the move to the attribute syntax). However this change is a breaking one from the schema perspective and requires state upgrade for existing resources that is performed by TF (by calling the provider's API). + +- [`strategy` attribute](https://registry.terraform.io/providers/elastic/ec/latest/docs/resources/ec_deployment#strategy) is converted to string with the same set of values that was used for its `type` attribute previously; + +- switching to TF protocol 6. From user perspective it should not require any change in their existing configurations. + +#### Migration guide. + +The schema modifications means that a current TF state cannot work as is with the provider version 0.6.0 and higher. + +There are 2 ways to tackle this + +- import existing resource using deployment ID, e.g `terraform import 'ec_deployment.test' ` +- state upgrade that is performed by TF by calling the provider's API so no action is required from user perspective + +Currently the state upgrade functionality is still in development so importing existing resources is the recommended way to deal with existing TF states. +Please mind the fact that state import doesn't import user passwords and secret tokens that can be the case if your TF modules make use of them. +State upgrade doesn't have this limitation. + +#### Known issues. + +For the migrated version (0.6.0 or higher), `terraform plan` output can contain more changes comparing to the older versions of the provider (that use TF SDK). +This happens because TF Framework treats all `computed` attributes as `unknown` (known after apply) once configuration changes. +`ec_deployment` schema contains quite a few of such attributes, so `terraform plan`'s output can be quite big for the resource due to the mentioned reason. +However, it doesn't mean that all attributes that marked as `unknown` in the plan will get new values after apply. +To mitigitate the problem, the provider uses plan modifiers that is a recommended way to reduce plan output. +However, currently plan modifiers don't cover all the `computed` attributes. + +Please make sure to update to the latest TF client version. diff --git a/build/Makefile.deps b/build/Makefile.deps index ac6edfdbc..728cee016 100644 --- a/build/Makefile.deps +++ b/build/Makefile.deps @@ -5,7 +5,7 @@ ARCH_GORELEASER:=$(shell $(PWD)/scripts/uname_arch_goreleaser.sh) VERSION_DIR:=$(GOBIN)/versions VERSION_GOLICENSER:=v0.3.0 -VERSION_GOLANGCILINT:=v1.49.0 +VERSION_GOLANGCILINT:=v1.50.0 VERSION_GORELEASER:=v1.2.5 VERSION_GOCHANGELOG:=v0.0.0-20201005170154-56335215ce3a VERSION_VERSIONBUMP:=v1.1.0 diff --git a/build/Makefile.test b/build/Makefile.test index 1622402c9..bf7c0f02e 100644 --- a/build/Makefile.test +++ b/build/Makefile.test @@ -3,7 +3,7 @@ SWEEP_DIR ?= $(TEST_ACC) SWEEP_CI_RUN_FILTER ?= ec_deployments TEST ?= ./... TEST_COUNT ?= 1 -TESTUNITARGS ?= -timeout 10s -p 4 -race -cover -coverprofile=reports/c.out +TESTUNITARGS ?= -timeout 10m -race -cover -coverprofile=reports/c.out TEST_ACC ?= github.com/elastic/terraform-provider-ec/ec/acc TEST_NAME ?= TestAcc TEST_ACC_PARALLEL = 6 @@ -26,7 +26,7 @@ unit: _report_path tests: unit .PHONY: testacc -## Runs the Terraform acceptance tests. Use TEST_NAME, TESTARGS, TEST_COUNT and TEST_ACC_PARALLEL to control execution. +## Runs the Terraform acceptance tests. Use TEST_NAME, TESTARGS, TEST_COUNT to control execution. testacc: @ echo "-> Running terraform acceptance tests..." @ TF_ACC=1 go test $(TEST_ACC) -v -count $(TEST_COUNT) -parallel $(TEST_ACC_PARALLEL) $(TESTARGS) -timeout 120m -run $(TEST_NAME) diff --git a/docs/data-sources/ec_stack.md b/docs/data-sources/ec_stack.md index 076e4cadf..480f585e1 100644 --- a/docs/data-sources/ec_stack.md +++ b/docs/data-sources/ec_stack.md @@ -4,7 +4,7 @@ description: |- Retrieves information about an Elastic Cloud stack. --- -# Data Source: ec_deployment +# Data Source: ec_stack Use this data source to retrieve information about an existing Elastic Cloud stack. diff --git a/docs/guides/configuring-sso-ec-deployment.md b/docs/guides/configuring-sso-ec-deployment.md index 13be068df..5b7e69d17 100644 --- a/docs/guides/configuring-sso-ec-deployment.md +++ b/docs/guides/configuring-sso-ec-deployment.md @@ -31,27 +31,27 @@ resource "ec_deployment" "elastic-sso" { version = "7.17.5" deployment_template_id = "aws-compute-optimized-v3" - elasticsearch { - topology { - id = "hot_content" + elasticsearch = { + hot = { size = "8g" zone_count = 2 } - topology { - id = "warm" + warm = { size = "8g" zone_count = 2 } - config { + config = { # The URL domain suffix that is used in this example is often different for other Elasticsearch Service regions. Please check the appropriate domain suffix for your used region. user_settings_yaml = templatefile("./es.yml", { kibana_url = format("https://%s-%s.kb.us-east-1.aws.found.io:9243", var.name, substr("${random_uuid.uuid.result}", 0, 6)) }) } } - kibana { - config { + kibana = { + topology = {} + + config = { user_settings_yaml = file("./kb.yml") } } @@ -71,7 +71,7 @@ You will configure the deployment alias field to be the same, so if the deployme Then, by using a variable in the `es.yml` file and a terraform templating mechanism, you can generate your own `es.yml` file. Your variable is named kibana_url, as seen in the ec_deployment resource: ```hcl -config { +config = { user_settings_yaml = templatefile("./es.yml", { kibana_url = format("https://%s-%s.kb.us-east-1.aws.found.io:9243", var.name, substr("${random_uuid.uuid.result}", 0, 6)) }) } ``` diff --git a/docs/resources/ec_deployment.md b/docs/resources/ec_deployment.md index d7fad1283..ef2a9e377 100644 --- a/docs/resources/ec_deployment.md +++ b/docs/resources/ec_deployment.md @@ -35,13 +35,17 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} - integrations_server {} + integrations_server = {} - enterprise_search {} + enterprise_search = {} } ``` @@ -58,56 +62,54 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { + elasticsearch = { autoscale = "true" # If `autoscale` is set, all topology elements that # - either set `size` in the plan or # - have non-zero default `max_size` (that is read from the deployment templates's `autoscaling_max` value) - # have to be listed in alphabetical order of their `id` fields, - # even if their blocks don't specify other fields beside `id` - topology { - id = "cold" + # have to be listed even if their blocks don't specify other fields beside `id` + + cold = { + autoscaling = {} } - topology { - id = "frozen" + frozen = { + autoscaling = {} } - topology { - id = "hot_content" + hot = { size = "8g" - autoscaling { + autoscaling = { max_size = "128g" max_size_resource = "memory" } } - topology { - id = "ml" + ml = { + autoscaling = {} } - topology { - id = "warm" + warm = { + autoscaling = {} } - } # Initial size for `hot_content` tier is set to 8g # so `hot_content`'s size has to be added to the `ignore_changes` meta-argument to ignore future modifications that can be made by the autoscaler lifecycle { ignore_changes = [ - elasticsearch[0].topology[2].size + elasticsearch.hot.size ] } - kibana {} + kibana = {} - integrations_server {} + integrations_server = {} - enterprise_search {} + enterprise_search = {} } ``` @@ -128,12 +130,16 @@ resource "ec_deployment" "example_observability" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} # Optional observability settings - observability { + observability = { deployment_id = ec_deployment.example_minimal.id } } @@ -141,7 +147,7 @@ resource "ec_deployment" "example_observability" { It is possible to enable observability without using a second deployment, by storing the observability data in the current deployment. To enable this, set `deployment_id` to `self`. ```hcl -observability { +observability = { deployment_id = "self" } ``` @@ -161,10 +167,10 @@ resource "ec_deployment" "source_deployment" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + hot = { + size = "1g" + autoscaling = {} } } } @@ -176,15 +182,18 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "aws-cross-cluster-search-v2" - elasticsearch { - remote_cluster { + elasticsearch = { + hot = { + autoscalign = {} + } + remote_cluster = [{ deployment_id = ec_deployment.source_deployment.id alias = ec_deployment.source_deployment.name ref_id = ec_deployment.source_deployment.elasticsearch.0.ref_id - } + }] } - kibana {} + kibana = {} } ``` @@ -205,7 +214,11 @@ resource "ec_deployment" "with_tags" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } tags = { owner = "elastic cloud" @@ -231,10 +244,13 @@ resource "ec_deployment" "with_tags" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - strategy { - type = "rolling_all" + elasticsearch = { + hot = { + autoscaling = {} } + strategy = [{ + type = "rolling_all" + }] } tags = { @@ -405,11 +421,6 @@ The optional `kibana` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the Kibana resource. The default value `main-kibana` is recommended. * `config` (Optional) Kibana settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `kibana.topology` block supports the following arguments: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. No need to change this value since Kibana has only one _instance type_. * `size` - (Optional) Amount of memory (RAM) per topology element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -432,11 +443,6 @@ The optional `integrations_server` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the Integrations Server resource. The default value `main-integrations_server` is recommended. * `config` (Optional) Integrations Server settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `integrations_server.topology` block supports the following arguments: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. No need to change this value since Integrations Server has only one _instance type_. * `size` - (Optional) Amount of memory (RAM) per topology element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -456,11 +462,6 @@ The optional `apm` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the APM resource. The default value `main-apm` is recommended. * `config` (Optional) APM settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `apm.topology` block supports the following arguments: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. No need to change this value since APM has only one _instance type_. * `size` - (Optional) Amount of memory (RAM) per topology element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. @@ -484,11 +485,6 @@ The optional `enterprise_search` block supports the following arguments: * `elasticsearch_cluster_ref_id` - (Optional) This field references the `ref_id` of the deployment Elasticsearch cluster. The default value `main-elasticsearch` is recommended. * `ref_id` - (Optional) Can be set on the Enterprise Search resource. The default value `main-enterprise_search` is recommended. * `config` (Optional) Enterprise Search settings applied to all topologies unless overridden in the `topology` element. - -##### Topology - -The optional `enterprise_search.topology` block supports the following settings: - * `instance_configuration_id` - (Optional) Default instance configuration of the deployment template. To change it, use the [full list](https://www.elastic.co/guide/en/cloud/current/ec-regions-templates-instances.html) of regions and deployment templates available in ESS. * `size` - (Optional) Amount of memory (RAM) per `topology` element in the "g" notation. When omitted, it defaults to the deployment template value. * `size_resource` - (Optional) Type of resource to which the size is assigned. Defaults to `"memory"`. diff --git a/docs/resources/ec_deployment_elasticsearch_keystore.md b/docs/resources/ec_deployment_elasticsearch_keystore.md index 0091f8ae8..6a5b1677e 100644 --- a/docs/resources/ec_deployment_elasticsearch_keystore.md +++ b/docs/resources/ec_deployment_elasticsearch_keystore.md @@ -63,7 +63,11 @@ resource "ec_deployment" "example_keystore" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } } # Create the keystore secret entry diff --git a/docs/resources/ec_deployment_traffic_filter.md b/docs/resources/ec_deployment_traffic_filter.md index 4ad185629..565f4876a 100644 --- a/docs/resources/ec_deployment_traffic_filter.md +++ b/docs/resources/ec_deployment_traffic_filter.md @@ -33,9 +33,13 @@ resource "ec_deployment" "example_minimal" { ] # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "example" { @@ -76,9 +80,13 @@ resource "ec_deployment" "example_minimal" { ] # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "azure" { @@ -121,9 +129,13 @@ resource "ec_deployment" "example_minimal" { ] # Use the deployment template defaults - elasticsearch {} + elasticsearch = { + hot = { + autoscaling = {} + } + } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "gcp_psc" { diff --git a/ec/acc/acc_prereq.go b/ec/acc/acc_prereq.go index f481276ae..91f00f650 100644 --- a/ec/acc/acc_prereq.go +++ b/ec/acc/acc_prereq.go @@ -22,22 +22,25 @@ import ( "os" "testing" + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" + "github.com/elastic/terraform-provider-ec/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) const ( prefix = "terraform_acc_" ) -var testAccProviderFactory = map[string]func() (*schema.Provider, error){ - "ec": providerFactory, -} +var testAccProviderFactory = protoV6ProviderFactories() -func providerFactory() (*schema.Provider, error) { - return ec.Provider(), nil +func protoV6ProviderFactories() map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": providerserver.NewProtocol6WithError(ec.New("acc-tests")), + } } func testAccPreCheck(t *testing.T) { diff --git a/ec/acc/datasource_deployment_basic_test.go b/ec/acc/datasource_deployment_basic_test.go index 17d1c149b..e33ae87eb 100644 --- a/ec/acc/datasource_deployment_basic_test.go +++ b/ec/acc/datasource_deployment_basic_test.go @@ -38,9 +38,9 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { var namePrefix = secondRandomName[:22] resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -52,55 +52,55 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { resource.TestCheckResourceAttrPair(datasourceName, "traffic_filter.#", resourceName, "traffic_filter.#"), // Elasticsearch - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.ref_id", resourceName, "elasticsearch.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.cloud_id", resourceName, "elasticsearch.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.resource_id", resourceName, "elasticsearch.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.http_endpoint_id", resourceName, "elasticsearch.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.https_endpoint_id", resourceName, "elasticsearch.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.instance_configuration_id", resourceName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size", resourceName, "elasticsearch.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size_resource", resourceName, "elasticsearch.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.zone_count", resourceName, "elasticsearch.0.topology.0.zone_count"), - resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.*.node_roles.*", resourceName, "elasticsearch.0.topology.*.node_roles.*"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.ref_id", resourceName, "elasticsearch.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.cloud_id", resourceName, "elasticsearch.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.resource_id", resourceName, "elasticsearch.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.http_endpoint_id", resourceName, "elasticsearch.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.https_endpoint_id", resourceName, "elasticsearch.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.instance_configuration_id", resourceName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size", resourceName, "elasticsearch.topology.hot_content.size"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.size_resource", resourceName, "elasticsearch.topology.hot_content.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.zone_count", resourceName, "elasticsearch.topology.hot_content.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "elasticsearch.0.topology.0.node_roles.*", resourceName, "elasticsearch.topology.hot_content.node_roles.*"), // Kibana - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.elasticsearch_cluster_ref_id", resourceName, "kibana.0.elasticsearch_cluster_ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.ref_id", resourceName, "kibana.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.cloud_id", resourceName, "kibana.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.resource_id", resourceName, "kibana.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.http_endpoint_id", resourceName, "kibana.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.https_endpoint_id", resourceName, "kibana.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.instance_configuration_id", resourceName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size", resourceName, "kibana.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size_resource", resourceName, "kibana.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.zone_count", resourceName, "kibana.0.topology.0.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.elasticsearch_cluster_ref_id", resourceName, "kibana.elasticsearch_cluster_ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.ref_id", resourceName, "kibana.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.cloud_id", resourceName, "kibana.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.resource_id", resourceName, "kibana.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.http_endpoint_id", resourceName, "kibana.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.https_endpoint_id", resourceName, "kibana.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.instance_configuration_id", resourceName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size", resourceName, "kibana.size"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.size_resource", resourceName, "kibana.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "kibana.0.topology.0.zone_count", resourceName, "kibana.zone_count"), // APM - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.elasticsearch_cluster_ref_id", resourceName, "apm.0.elasticsearch_cluster_ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.ref_id", resourceName, "apm.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.cloud_id", resourceName, "apm.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.resource_id", resourceName, "apm.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.http_endpoint_id", resourceName, "apm.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.https_endpoint_id", resourceName, "apm.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.instance_configuration_id", resourceName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size", resourceName, "apm.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size_resource", resourceName, "apm.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.zone_count", resourceName, "apm.0.topology.0.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.elasticsearch_cluster_ref_id", resourceName, "apm.elasticsearch_cluster_ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.ref_id", resourceName, "apm.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.cloud_id", resourceName, "apm.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.resource_id", resourceName, "apm.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.http_endpoint_id", resourceName, "apm.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.https_endpoint_id", resourceName, "apm.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.instance_configuration_id", resourceName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size", resourceName, "apm.size"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.size_resource", resourceName, "apm.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "apm.0.topology.0.zone_count", resourceName, "apm.zone_count"), // Enterprise Search - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.elasticsearch_cluster_ref_id", resourceName, "enterprise_search.0.elasticsearch_cluster_ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.ref_id", resourceName, "enterprise_search.0.ref_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.cloud_id", resourceName, "enterprise_search.0.cloud_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.resource_id", resourceName, "enterprise_search.0.resource_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.http_endpoint_id", resourceName, "enterprise_search.0.http_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.https_endpoint_id", resourceName, "enterprise_search.0.https_endpoint_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.instance_configuration_id", resourceName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size", resourceName, "enterprise_search.0.topology.0.size"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size_resource", resourceName, "enterprise_search.0.topology.0.size_resource"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.zone_count", resourceName, "enterprise_search.0.topology.0.zone_count"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_appserver", resourceName, "enterprise_search.0.topology.0.node_type_appserver"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_connector", resourceName, "enterprise_search.0.topology.0.node_type_connector"), - resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_worker", resourceName, "enterprise_search.0.topology.0.node_type_worker"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.elasticsearch_cluster_ref_id", resourceName, "enterprise_search.elasticsearch_cluster_ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.ref_id", resourceName, "enterprise_search.ref_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.cloud_id", resourceName, "enterprise_search.cloud_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.resource_id", resourceName, "enterprise_search.resource_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.http_endpoint_id", resourceName, "enterprise_search.http_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.https_endpoint_id", resourceName, "enterprise_search.https_endpoint_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.instance_configuration_id", resourceName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size", resourceName, "enterprise_search.size"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.size_resource", resourceName, "enterprise_search.size_resource"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.zone_count", resourceName, "enterprise_search.zone_count"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_appserver", resourceName, "enterprise_search.node_type_appserver"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_connector", resourceName, "enterprise_search.node_type_connector"), + resource.TestCheckResourceAttrPair(datasourceName, "enterprise_search.0.topology.0.node_type_worker", resourceName, "enterprise_search.node_type_worker"), ), }, { @@ -114,16 +114,16 @@ func TestAccDatasourceDeployment_basic(t *testing.T) { resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.alias", resourceName, "alias"), // Query results - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_resource_id", resourceName, "elasticsearch.0.resource_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_resource_id", resourceName, "kibana.0.resource_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_resource_id", resourceName, "apm.0.resource_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_resource_id", resourceName, "enterprise_search.0.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_resource_id", resourceName, "elasticsearch.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_resource_id", resourceName, "kibana.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_resource_id", resourceName, "apm.resource_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_resource_id", resourceName, "enterprise_search.resource_id"), // Ref ID check. - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_ref_id", resourceName, "elasticsearch.0.ref_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_ref_id", resourceName, "kibana.0.ref_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_ref_id", resourceName, "apm.0.ref_id"), - resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_ref_id", resourceName, "enterprise_search.0.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.elasticsearch_ref_id", resourceName, "elasticsearch.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.kibana_ref_id", resourceName, "kibana.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.apm_ref_id", resourceName, "apm.ref_id"), + resource.TestCheckResourceAttrPair(depsDatasourceName, "deployments.0.enterprise_search_ref_id", resourceName, "enterprise_search.ref_id"), ), }, }, diff --git a/ec/acc/datasource_stack_test.go b/ec/acc/datasource_stack_test.go index 69f204b5e..382f3d759 100644 --- a/ec/acc/datasource_stack_test.go +++ b/ec/acc/datasource_stack_test.go @@ -31,8 +31,8 @@ func TestAccDatasourceStack_latest(t *testing.T) { cfg := fixtureAccStackDataSource(t, depCfg, getRegion()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, Steps: []resource.TestStep{ { Config: cfg, @@ -53,14 +53,14 @@ func TestAccDatasourceStack_regex(t *testing.T) { cfg := fixtureAccStackDataSource(t, depCfg, getRegion()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, Steps: []resource.TestStep{ { Config: cfg, PreventDiskCleanup: true, Check: checkDataSourceStack(datasourceName, - resource.TestCheckResourceAttr(datasourceName, "version_regex", "7.0.?"), + resource.TestCheckResourceAttr(datasourceName, "version_regex", "8.4.?"), resource.TestCheckResourceAttr(datasourceName, "region", getRegion()), ), }, diff --git a/ec/acc/datasource_tags_test.go b/ec/acc/datasource_tags_test.go index ca5417c42..ef22f64fc 100644 --- a/ec/acc/datasource_tags_test.go +++ b/ec/acc/datasource_tags_test.go @@ -43,9 +43,9 @@ func TestAccDatasource_basic_tags(t *testing.T) { cfg := fixtureAccTagsDataSource(t, depCfg, randomName, getRegion(), defaultTemplate, testID) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_autoscaling_test.go b/ec/acc/deployment_autoscaling_test.go index c92810dec..3ae42c578 100644 --- a/ec/acc/deployment_autoscaling_test.go +++ b/ec/acc/deployment_autoscaling_test.go @@ -37,88 +37,67 @@ func TestAccDeployment_autoscaling(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.autoscale", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "5"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.autoscale", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "cold"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "0g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.0.max_size", "58g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.cold.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size", "0g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.autoscaling.max_size", "58g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "frozen"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "0g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.0.max_size", "120g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.size", "0g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.frozen.autoscaling.max_size", "120g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.autoscaling.0.max_size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.autoscaling.max_size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.id", "ml"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.autoscaling.0.max_size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.autoscaling.0.min_size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.autoscaling.max_size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.ml.autoscaling.min_size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.id", "warm"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.4.autoscaling.0.max_size", "15g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.autoscaling.max_size", "15g"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, // also disables ML { Config: cfgF(disableAutoscale), - // When disabling a tier the plan will be non empty on refresh - // since the topology block is present with size = "0g". - ExpectNonEmptyPlan: true, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.autoscale", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.autoscale", "false"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.autoscaling.0.max_size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.autoscaling.max_size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.autoscaling.0.max_size", "15g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.autoscaling.max_size", "15g"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_basic_defaults_test.go b/ec/acc/deployment_basic_defaults_test.go index 1bb66014b..c93f27666 100644 --- a/ec/acc/deployment_basic_defaults_test.go +++ b/ec/acc/deployment_basic_defaults_test.go @@ -31,7 +31,7 @@ import ( // * Resource defaults. // * Resource declaration in the {} format. ("apm {}"). // * Topology field overrides over field defaults. -func TestAccDeployment_basic_defaults(t *testing.T) { +func TestAccDeployment_basic_defaults_first(t *testing.T) { resName := "ec_deployment.defaults" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) startCfg := "testdata/deployment_basic_defaults_1.tf" @@ -42,40 +42,29 @@ func TestAccDeployment_basic_defaults(t *testing.T) { thirdConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, thirdCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, // Checks the defaults which are populated using a mix of // Deployment Template and schema defaults. Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), ), }, { @@ -83,69 +72,45 @@ func TestAccDeployment_basic_defaults(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // changed - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.size", "2g"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), ), }, { // Remove all resources except Elasticsearch and Kibana and set a node type override Config: thirdConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), // In this test we're verifying that the topology for Kibana is not reset. // This is due to the terraform SDK stickyness where a removed computed block // with a previous value is the same as an empty block, so previous computed // values are used. - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "2g"), - - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttr(resName, "kibana.size", "2g"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, @@ -161,29 +126,22 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { hotWarmCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, // Create a deployment which only uses Elasticsearch resources Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { @@ -191,37 +149,23 @@ func TestAccDeployment_basic_defaults_hw(t *testing.T) { // hot warm, use defaults. Config: hotWarmCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), // Hot Warm defaults to 4g. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_basic_tags_test.go b/ec/acc/deployment_basic_tags_test.go index d1bf1a6a0..b431de454 100644 --- a/ec/acc/deployment_basic_tags_test.go +++ b/ec/acc/deployment_basic_tags_test.go @@ -42,29 +42,22 @@ func TestAccDeployment_basic_tags(t *testing.T) { fourthConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, fourthCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with tags. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "2"), @@ -76,21 +69,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Remove a tag. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "1"), @@ -101,21 +87,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Remove the tags block. Config: thirdConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "0"), @@ -125,21 +104,14 @@ func TestAccDeployment_basic_tags(t *testing.T) { // Add the tags block with a single tag. Config: fourthConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), // Tags resource.TestCheckResourceAttr(resName, "tags.%", "1"), diff --git a/ec/acc/deployment_basic_test.go b/ec/acc/deployment_basic_test.go index ec331e744..9115e25e9 100644 --- a/ec/acc/deployment_basic_test.go +++ b/ec/acc/deployment_basic_test.go @@ -42,27 +42,26 @@ func TestAccDeployment_basic_tf(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, resource.TestCheckResourceAttr(resName, "alias", randomAlias), - resource.TestCheckResourceAttr(resName, "apm.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "traffic_filter.#", "0"), + resource.TestCheckNoResourceAttr(resName, "apm.config"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search.config"), + resource.TestCheckNoResourceAttr(resName, "traffic_filter"), // Ensure at least 1 account is trusted (self). - resource.TestCheckResourceAttr(resName, "elasticsearch.0.trust_account.#", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.trust_account.#", "1"), ), }, { Config: cfgWithTrafficFilter, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, // Ensure at least 1 account is trusted (self). It isn't deleted. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.trust_account.#", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.trust_account.#", "1"), resource.TestCheckResourceAttr(resName, "traffic_filter.#", "1"), ), }, @@ -76,9 +75,7 @@ func TestAccDeployment_basic_tf(t *testing.T) { { Config: cfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), resource.TestCheckResourceAttr(resName, "traffic_filter.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.0.config.#", "0"), ), }, }, @@ -88,9 +85,9 @@ func TestAccDeployment_basic_tf(t *testing.T) { func TestAccDeployment_basic_config(t *testing.T) { resName := "ec_deployment.basic" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - startCfg := "testdata/deployment_basic_settings_config_1.tf" + importCfg := "testdata/deployment_basic_settings_config_import.tf" settingsConfig := "testdata/deployment_basic_settings_config_2.tf" - cfg := fixtureAccDeploymentResourceBasicWithApps(t, startCfg, randomName, getRegion(), defaultTemplate) + cfg := fixtureAccDeploymentResourceBasicWithApps(t, importCfg, randomName, getRegion(), defaultTemplate) settingsConfigCfg := fixtureAccDeploymentResourceBasicWithApps(t, settingsConfig, randomName, getRegion(), defaultTemplate) deploymentVersion, err := latestStackVersion() if err != nil { @@ -98,31 +95,27 @@ func TestAccDeployment_basic_config(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: settingsConfigCfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.0.user_settings_yaml", "action.auto_create_index: true"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.debug_enabled", "true"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.user_settings_json", `{"apm-server.rum.enabled":true}`), - resource.TestCheckResourceAttr(resName, "kibana.0.config.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.config.0.user_settings_yaml", "csp.warnLegacyBrowsers: true"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.0.user_settings_yaml", "# comment"), + resource.TestCheckResourceAttr(resName, "elasticsearch.config.user_settings_yaml", "action.auto_create_index: true"), + resource.TestCheckResourceAttr(resName, "apm.config.debug_enabled", "true"), + resource.TestCheckResourceAttr(resName, "apm.config.user_settings_json", `{"apm-server.rum.enabled":true}`), + resource.TestCheckResourceAttr(resName, "kibana.config.user_settings_yaml", "csp.warnLegacyBrowsers: true"), + resource.TestCheckResourceAttr(resName, "enterprise_search.config.user_settings_yaml", "# comment"), ), }, { Config: cfg, Check: checkBasicDeploymentResource(resName, randomName, deploymentVersion, - resource.TestCheckResourceAttr(resName, "apm.0.config.#", "1"), - // The config block is unset in the configuration so it disappears from the state. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.debug_enabled", "false"), - resource.TestCheckResourceAttr(resName, "kibana.0.config.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.#", "0"), + resource.TestCheckResourceAttr(resName, "apm.config.%", "0"), + resource.TestCheckNoResourceAttr(resName, "elasticsearch.config.user_settings_yaml"), + resource.TestCheckResourceAttr(resName, "kibana.config.%", "0"), + resource.TestCheckResourceAttr(resName, "enterprise_search.config.%", "0"), ), }, // Import resource without complex ID @@ -194,32 +187,28 @@ func checkBasicDeploymentResource(resName, randomDeploymentName, deploymentVersi testAccCheckDeploymentExists(resName), resource.TestCheckResourceAttr(resName, "name", randomDeploymentName), resource.TestCheckResourceAttr(resName, "region", getRegion()), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.region", getRegion()), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), resource.TestCheckResourceAttrSet(resName, "apm_secret_token"), resource.TestCheckResourceAttrSet(resName, "elasticsearch_username"), resource.TestCheckResourceAttrSet(resName, "elasticsearch_password"), - resource.TestCheckResourceAttrSet(resName, "apm.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "apm.0.https_endpoint"), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.https_endpoint"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.https_endpoint"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.region", getRegion()), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.http_endpoint"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.https_endpoint"), + resource.TestCheckResourceAttrSet(resName, "apm.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "apm.https_endpoint"), + resource.TestCheckResourceAttr(resName, "elasticsearch.region", getRegion()), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.https_endpoint"), + resource.TestCheckResourceAttr(resName, "kibana.region", getRegion()), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "kibana.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "kibana.https_endpoint"), + resource.TestCheckResourceAttr(resName, "enterprise_search.region", getRegion()), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.http_endpoint"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.https_endpoint"), }, checks...)...) } diff --git a/ec/acc/deployment_ccs_test.go b/ec/acc/deployment_ccs_test.go index 5845b7b56..e51b82e8c 100644 --- a/ec/acc/deployment_ccs_test.go +++ b/ec/acc/deployment_ccs_test.go @@ -43,9 +43,9 @@ func TestAccDeployment_ccs(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, ccsRandomName, getRegion(), ccsTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a CCS deployment with the default settings. @@ -53,50 +53,43 @@ func TestAccDeployment_ccs(t *testing.T) { Check: resource.ComposeAggregateTestCheckFunc( // CCS Checks - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.instance_configuration_id"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.instance_configuration_id"), // CCS defaults to 1g. - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size_resource", "memory"), // Remote cluster settings - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.#", "3"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.remote_cluster.0.deployment_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.0.alias", fmt.Sprint(sourceRandomName, "-0")), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.remote_cluster.1.deployment_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.1.alias", fmt.Sprint(sourceRandomName, "-1")), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.remote_cluster.2.deployment_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.2.alias", fmt.Sprint(sourceRandomName, "-2")), - - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.#", "0"), - resource.TestCheckResourceAttr(ccsResName, "apm.#", "0"), - resource.TestCheckResourceAttr(ccsResName, "enterprise_search.#", "0"), - + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.#", "3"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.0.deployment_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.0.alias", fmt.Sprint(sourceRandomName, "-0")), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.1.deployment_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.1.alias", fmt.Sprint(sourceRandomName, "-1")), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.remote_cluster.2.deployment_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.2.alias", fmt.Sprint(sourceRandomName, "-2")), + + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ml"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckNoResourceAttr(sourceResName, "kibana"), + resource.TestCheckNoResourceAttr(sourceResName, "apm"), + resource.TestCheckNoResourceAttr(sourceResName, "enterprise_search"), // Source Checks - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(sourceResName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(sourceResName, "kibana.#", "0"), - resource.TestCheckResourceAttr(sourceResName, "apm.#", "0"), - resource.TestCheckResourceAttr(sourceResName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ml"), + resource.TestCheckResourceAttrSet(sourceResName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(sourceResName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckNoResourceAttr(sourceResName, "kibana"), + resource.TestCheckNoResourceAttr(sourceResName, "apm"), + resource.TestCheckNoResourceAttr(sourceResName, "enterprise_search"), ), }, { @@ -104,29 +97,25 @@ func TestAccDeployment_ccs(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // Changes. - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.size_resource", "memory"), - - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.remote_cluster.#", "0"), - - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(ccsResName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.#", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(ccsResName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(ccsResName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(ccsResName, "apm.#", "0"), - resource.TestCheckResourceAttr(ccsResName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.size_resource", "memory"), + + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.remote_cluster.#", "0"), + + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_data"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ingest"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_master"), + resource.TestCheckNoResourceAttr(ccsResName, "elasticsearch.topology.hot_content.node_type_ml"), + + resource.TestCheckResourceAttrSet(ccsResName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(ccsResName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(ccsResName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(ccsResName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(ccsResName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(ccsResName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(ccsResName, "apm"), + resource.TestCheckNoResourceAttr(ccsResName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_checks_test.go b/ec/acc/deployment_checks_test.go index 5ae87ad86..c4a4cac96 100644 --- a/ec/acc/deployment_checks_test.go +++ b/ec/acc/deployment_checks_test.go @@ -20,11 +20,12 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccCheckDeploymentExists(name string) resource.TestCheckFunc { diff --git a/ec/acc/deployment_compute_optimized_test.go b/ec/acc/deployment_compute_optimized_test.go index a1c7c322b..7c9feb952 100644 --- a/ec/acc/deployment_compute_optimized_test.go +++ b/ec/acc/deployment_compute_optimized_test.go @@ -33,65 +33,45 @@ func TestAccDeployment_computeOptimized(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), computeOpTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Compute Optimized deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_dedicated_test.go b/ec/acc/deployment_dedicated_test.go index 50d11f2bd..ff3e3af7a 100644 --- a/ec/acc/deployment_dedicated_test.go +++ b/ec/acc/deployment_dedicated_test.go @@ -31,40 +31,35 @@ func TestAccDeployment_dedicated_coordinating(t *testing.T) { cfg := fixtureAccDeploymentResourceBasicDefaults(t, startCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with dedicated coordinating. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "3"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "coordinating"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.2.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size_resource", "memory"), - - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.coordinating.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.coordinating.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.coordinating.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.coordinating.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.coordinating.size_resource", "memory"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, @@ -78,48 +73,42 @@ func TestAccDeployment_dedicated_master(t *testing.T) { cfg := fixtureAccDeploymentResourceBasicDefaults(t, startCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with dedicated master nodes. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "4"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.2.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.3.instance_configuration_id"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "cold"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "3"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.id", "master"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.2.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.2.zone_count", "3"), - - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.id", "warm"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.3.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.3.zone_count", "2"), - - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.cold.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.master.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.cold.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.cold.zone_count", "1"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "3"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.master.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.master.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.master.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.master.zone_count", "3"), + + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "2"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_destroy_test.go b/ec/acc/deployment_destroy_test.go index e3817a613..5b70f5eff 100644 --- a/ec/acc/deployment_destroy_test.go +++ b/ec/acc/deployment_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccDeploymentDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_docker_image_override_test.go b/ec/acc/deployment_docker_image_override_test.go index f748a221a..be8144739 100644 --- a/ec/acc/deployment_docker_image_override_test.go +++ b/ec/acc/deployment_docker_image_override_test.go @@ -44,17 +44,17 @@ func TestAccDeployment_docker_image_override(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF("testdata/deployment_docker_image_override.tf"), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.0.docker_image", "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT"), - resource.TestCheckResourceAttr(resName, "kibana.0.config.0.docker_image", "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT"), - resource.TestCheckResourceAttr(resName, "apm.0.config.0.docker_image", "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.config.0.docker_image", "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "elasticsearch.config.docker_image", "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "kibana.config.docker_image", "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "apm.config.docker_image", "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT"), + resource.TestCheckResourceAttr(resName, "enterprise_search.config.docker_image", "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT"), ), }, }, diff --git a/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go b/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go index 6a4332355..6dea775b3 100644 --- a/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go +++ b/ec/acc/deployment_elasticsearch_kesytore_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccDeploymentElasticsearchKeystoreDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_elasticsearch_keystore_test.go b/ec/acc/deployment_elasticsearch_keystore_test.go index d2e97ece0..9d45d960b 100644 --- a/ec/acc/deployment_elasticsearch_keystore_test.go +++ b/ec/acc/deployment_elasticsearch_keystore_test.go @@ -21,19 +21,17 @@ import ( "fmt" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/elastic/cloud-sdk-go/pkg/util/slice" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/multierror" ) func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { var previousID, currentID string resType := "ec_deployment_elasticsearch_keystore" - deploymentResName := "ec_deployment.keystore" firstResName := resType + ".test" secondResName := resType + ".gcs_creds" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -49,8 +47,8 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: resource.ComposeAggregateTestCheckFunc( testAccDeploymentDestroy, testAccDeploymentElasticsearchKeystoreDestroy, @@ -68,8 +66,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), resource.TestCheckResourceAttr(secondResName, "as_file", "false"), resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), - - checkExpectedKeystoreKeysExist(deploymentResName, "xpack.notification.slack.account.hello.secure_url", "gcs.client.secondary.credentials_file"), ), }, { @@ -86,8 +82,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), resource.TestCheckResourceAttr(secondResName, "as_file", "false"), resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), - - checkExpectedKeystoreKeysExist(deploymentResName, "xpack.notification.slack.account.hello.secure_url", "gcs.client.secondary.credentials_file"), ), }, { @@ -104,8 +98,6 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), resource.TestCheckResourceAttr(secondResName, "as_file", "false"), resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), - - checkExpectedKeystoreKeysExist(deploymentResName, "xpack.notification.slack.account.hello.secure_urla", "gcs.client.secondary.credentials_file"), ), }, { @@ -126,53 +118,62 @@ func TestAccDeploymentElasticsearchKeystore_full(t *testing.T) { }) } -func checkExpectedKeystoreKeysExist(deploymentResource string, expectedKeys ...string) resource.TestCheckFunc { - return func(s *terraform.State) error { - client, err := newAPI() - if err != nil { - return err - } - - deployment, ok := s.RootModule().Resources[deploymentResource] - if !ok { - return fmt.Errorf("Not found: %s", deploymentResource) - } - - deploymentID := deployment.Primary.ID - - keystoreContents, err := eskeystoreapi.Get(eskeystoreapi.GetParams{ - API: client, - DeploymentID: deploymentID, - }) - if err != nil { - return err - } - - var missingKeys, extraKeys []string - for _, expectedKey := range expectedKeys { - if _, ok := keystoreContents.Secrets[expectedKey]; !ok { - missingKeys = append(missingKeys, expectedKey) - } - } +func TestAccDeploymentElasticsearchKeystore_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") - for key := range keystoreContents.Secrets { - if !slice.HasString(expectedKeys, key) { - extraKeys = append(extraKeys, key) - } - } + resType := "ec_deployment_elasticsearch_keystore" + firstResName := resType + ".test" + secondResName := resType + ".gcs_creds" + randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + startCfg := "testdata/deployment_elasticsearch_keystore_1_041.tf" + migratedCfg := "testdata/deployment_elasticsearch_keystore_1_migrated.tf" - mErr := multierror.NewPrefixed("unexpected keystore contents") + cfgF := func(cfg string) string { + return fixtureAccDeploymentResourceBasic( + t, cfg, randomName, getRegion(), defaultTemplate, + ) + } - if len(missingKeys) > 0 { - mErr = mErr.Append(fmt.Errorf("keys missing from the deployment keystore %v", missingKeys)) - } + resource.ParallelTest(t, resource.TestCase{ + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + Config: cfgF(startCfg), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(firstResName, "setting_name", "xpack.notification.slack.account.hello.secure_url"), + resource.TestCheckResourceAttr(firstResName, "value", "hella"), + resource.TestCheckResourceAttr(firstResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(firstResName, "deployment_id"), - if len(extraKeys) > 0 { - mErr = mErr.Append(fmt.Errorf("extra keys present in the deployment keystore: %v", extraKeys)) - } + resource.TestCheckResourceAttr(secondResName, "setting_name", "gcs.client.secondary.credentials_file"), + resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), + resource.TestCheckResourceAttr(secondResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: cfgF(migratedCfg), + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(firstResName, "setting_name", "xpack.notification.slack.account.hello.secure_url"), + resource.TestCheckResourceAttr(firstResName, "value", "hella"), + resource.TestCheckResourceAttr(firstResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(firstResName, "deployment_id"), - return mErr.ErrorOrNil() - } + resource.TestCheckResourceAttr(secondResName, "setting_name", "gcs.client.secondary.credentials_file"), + resource.TestCheckResourceAttr(secondResName, "value", "{\n \"type\": \"service_account\",\n \"project_id\": \"project-id\",\n \"private_key_id\": \"key-id\",\n \"private_key\": \"-----BEGIN PRIVATE KEY-----\\nprivate-key\\n-----END PRIVATE KEY-----\\n\",\n \"client_email\": \"service-account-email\",\n \"client_id\": \"client-id\",\n \"auth_uri\": \"https://accounts.google.com/o/oauth2/auth\",\n \"token_uri\": \"https://accounts.google.com/o/oauth2/token\",\n \"auth_provider_x509_cert_url\": \"https://www.googleapis.com/oauth2/v1/certs\",\n \"client_x509_cert_url\": \"https://www.googleapis.com/robot/v1/metadata/x509/service-account-email\"\n}"), + resource.TestCheckResourceAttr(secondResName, "as_file", "false"), + resource.TestCheckResourceAttrSet(secondResName, "deployment_id"), + ), + }, + }, + }) } func checkESKeystoreResourceID(resourceName string, id *string) resource.TestCheckFunc { diff --git a/ec/acc/deployment_emptyconf_test.go b/ec/acc/deployment_emptyconf_test.go index 2881fb507..06888615f 100644 --- a/ec/acc/deployment_emptyconf_test.go +++ b/ec/acc/deployment_emptyconf_test.go @@ -36,21 +36,17 @@ func TestAccDeployment_emptyconfig(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.config.#", "0"), + // config has 6 attributes + resource.TestCheckResourceAttr(resName, "elasticsearch.config.%", "6"), + resource.TestCheckNoResourceAttr(resName, "elasticsearch.config.user_settings_yaml"), ), - // Since the configuration specifies a `config {}` block but - // the setting itself is `null`, the config {} block will be - // set to empty and will cause the plan to always have a diff: - // + config {}. - ExpectNonEmptyPlan: true, }, }, }) diff --git a/ec/acc/deployment_enterprise_search_test.go b/ec/acc/deployment_enterprise_search_test.go index b5364a9c9..17d9b1388 100644 --- a/ec/acc/deployment_enterprise_search_test.go +++ b/ec/acc/deployment_enterprise_search_test.go @@ -33,70 +33,48 @@ func TestAccDeployment_enterpriseSearch(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), enterpriseSearchTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create an Enterprise Search deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), ), }, { // Change the Elasticsearch topology size. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "enterprise_search.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "enterprise_search.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckResourceAttr(resName, "enterprise_search.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "enterprise_search.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size", "2g"), + resource.TestCheckResourceAttr(resName, "enterprise_search.size_resource", "memory"), ), }, }, diff --git a/ec/acc/deployment_extension_basic_test.go b/ec/acc/deployment_extension_basic_test.go index 0a754bcfd..84a8483ec 100644 --- a/ec/acc/deployment_extension_basic_test.go +++ b/ec/acc/deployment_extension_basic_test.go @@ -34,9 +34,9 @@ func TestAccDeploymentExtension_basic(t *testing.T) { cfg2 := fixtureAccExtensionBasicWithTF(t, "testdata/extension_basic.tf", randomName, "updated desc") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccExtensionDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -65,6 +65,48 @@ func TestAccDeploymentExtension_basic(t *testing.T) { }) } +func TestAccDeploymentExtension_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + + resName := "ec_deployment_extension.my_extension" + randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + + cfg := fixtureAccExtensionBasicWithTF(t, "testdata/extension_basic.tf", randomName, "desc") + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + Config: cfg, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName, "name", randomName), + resource.TestCheckResourceAttr(resName, "version", "*"), + resource.TestCheckResourceAttr(resName, "description", "desc"), + resource.TestCheckResourceAttr(resName, "extension_type", "bundle"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: cfg, + Check: resource.ComposeAggregateTestCheckFunc( + resource.TestCheckResourceAttr(resName, "name", randomName), + resource.TestCheckResourceAttr(resName, "version", "*"), + resource.TestCheckResourceAttr(resName, "description", "desc"), + resource.TestCheckResourceAttr(resName, "extension_type", "bundle"), + ), + }, + }, + }) +} + func fixtureAccExtensionBasicWithTF(t *testing.T, tfFileName, extensionName, description string) string { t.Helper() diff --git a/ec/acc/deployment_extension_bundle_file_test.go b/ec/acc/deployment_extension_bundle_file_test.go index c4bcbeffb..23f71a039 100644 --- a/ec/acc/deployment_extension_bundle_file_test.go +++ b/ec/acc/deployment_extension_bundle_file_test.go @@ -27,10 +27,11 @@ import ( "path/filepath" "testing" - "github.com/elastic/cloud-sdk-go/pkg/client/extensions" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/client/extensions" ) func TestAccDeploymentExtension_bundleFile(t *testing.T) { @@ -43,9 +44,9 @@ func TestAccDeploymentExtension_bundleFile(t *testing.T) { cfg := fixtureAccExtensionBundleWithTF(t, "testdata/extension_bundle_file.tf", filePath, randomName, "desc") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccExtensionDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { PreConfig: func() { writeFile(t, filePath, "extension.txt", "foo") }, diff --git a/ec/acc/deployment_extension_destroy_test.go b/ec/acc/deployment_extension_destroy_test.go index 140e9f153..a715b506b 100644 --- a/ec/acc/deployment_extension_destroy_test.go +++ b/ec/acc/deployment_extension_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/client/extensions" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccExtensionDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_extension_plugin_download_test.go b/ec/acc/deployment_extension_plugin_download_test.go index dc3fdd5ea..d73175cd4 100644 --- a/ec/acc/deployment_extension_plugin_download_test.go +++ b/ec/acc/deployment_extension_plugin_download_test.go @@ -34,9 +34,9 @@ func TestAccDeploymentExtension_pluginDownload(t *testing.T) { cfg := fixtureAccExtensionBundleDownloadWithTF(t, "testdata/extension_plugin_download.tf", randomName, downloadURL) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccExtensionDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccExtensionDestroy, Steps: []resource.TestStep{ { Config: cfg, diff --git a/ec/acc/deployment_failed_upgrade_retry_test.go b/ec/acc/deployment_failed_upgrade_retry_test.go index 36c3f25c8..9334cc706 100644 --- a/ec/acc/deployment_failed_upgrade_retry_test.go +++ b/ec/acc/deployment_failed_upgrade_retry_test.go @@ -23,7 +23,7 @@ import ( "regexp" "testing" - semver "github.com/blang/semver/v4" + "github.com/blang/semver/v4" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) @@ -32,9 +32,9 @@ func TestAccDeployment_failed_upgrade_retry(t *testing.T) { var esCreds creds resName := "ec_deployment.upgrade_retry" resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: fixtureDeploymentDefaults(t, "testdata/deployment_upgrade_retry_1.tf"), @@ -47,7 +47,7 @@ func TestAccDeployment_failed_upgrade_retry(t *testing.T) { // Creates an Elasticsearch index that will make the kibana upgrade fail. PreConfig: createIndex(t, &esCreds, ".kibana_2"), Config: fixtureDeploymentDefaults(t, "testdata/deployment_upgrade_retry_2.tf"), - ExpectError: regexp.MustCompile(`\[kibana\].*Plan change failed.*`), + ExpectError: regexp.MustCompile(`\[kibana\].*Plan[ |\t|\n]+change[ |\t|\n]+failed.*`), Check: resource.ComposeAggregateTestCheckFunc( checkMajorMinorVersion(t, resName, 7, 10), ), diff --git a/ec/acc/deployment_hotwarm_test.go b/ec/acc/deployment_hotwarm_test.go index 5a800a2b3..8d4c1246b 100644 --- a/ec/acc/deployment_hotwarm_test.go +++ b/ec/acc/deployment_hotwarm_test.go @@ -38,41 +38,29 @@ func TestAccDeployment_hotwarm(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasic(t, secondCfg, randomName, getRegion(), hotWarmTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Hot / Warm deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), // Hot Warm defaults to 4g. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "4g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "4g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "2"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { @@ -80,32 +68,20 @@ func TestAccDeployment_hotwarm(t *testing.T) { Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( // Changes. - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_integrations_server_test.go b/ec/acc/deployment_integrations_server_test.go index 5cf172b9d..f9346fbda 100644 --- a/ec/acc/deployment_integrations_server_test.go +++ b/ec/acc/deployment_integrations_server_test.go @@ -33,44 +33,30 @@ func TestAccDeployment_integrationsServer(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create an Integrations Server deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttrSet(resName, "integrations_server.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttrSet(resName, "integrations_server.0.apm_https_endpoint"), - resource.TestCheckResourceAttrSet(resName, "integrations_server.0.fleet_https_endpoint"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "integrations_server.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "integrations_server.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "integrations_server.size", "1g"), + resource.TestCheckResourceAttr(resName, "integrations_server.size_resource", "memory"), ), }, { // Change the Integrations Server topology (increase zone count to 2). Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.#", "1"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttrSet(resName, "integrations_server.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "integrations_server.0.topology.0.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttr(resName, "integrations_server.zone_count", "2"), + resource.TestCheckResourceAttrSet(resName, "integrations_server.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "integrations_server.size", "1g"), + resource.TestCheckResourceAttr(resName, "integrations_server.size_resource", "memory"), ), }, }, diff --git a/ec/acc/deployment_memory_optimized_test.go b/ec/acc/deployment_memory_optimized_test.go index 5c58756f7..ac3c1a1e4 100644 --- a/ec/acc/deployment_memory_optimized_test.go +++ b/ec/acc/deployment_memory_optimized_test.go @@ -33,65 +33,45 @@ func TestAccDeployment_memoryOptimized(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), memoryOpTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Memory Optimized deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_observability_self_test.go b/ec/acc/deployment_observability_self_test.go index 375a2e83a..b326c0cc2 100644 --- a/ec/acc/deployment_observability_self_test.go +++ b/ec/acc/deployment_observability_self_test.go @@ -33,18 +33,18 @@ func TestAccDeployment_observability_createWithSelfObservability(t *testing.T) { config := fixtureAccDeploymentResourceSelfObs(t, configFile, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a deployment with observability-target 'self' // After creation, the target-deployment-id should be the id of the created deployment Config: config, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", resName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "true"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "true"), + resource.TestCheckResourceAttr(resName, "observability.deployment_id", "self"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "true"), + resource.TestCheckResourceAttr(resName, "observability.logs", "true"), ), }, }, diff --git a/ec/acc/deployment_observability_test.go b/ec/acc/deployment_observability_test.go index 0a23bc1de..ee9464a71 100644 --- a/ec/acc/deployment_observability_test.go +++ b/ec/acc/deployment_observability_test.go @@ -26,7 +26,7 @@ import ( "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) -func TestAccDeployment_observability(t *testing.T) { +func TestAccDeployment_observability_first(t *testing.T) { resName := "ec_deployment.observability" secondResName := "ec_deployment.basic" randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) @@ -40,42 +40,41 @@ func TestAccDeployment_observability(t *testing.T) { fourthCfg := fixtureAccDeploymentResourceBasicObs(t, removeObsCfg, randomName, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", secondResName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "true"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "true"), + resource.TestCheckResourceAttrPair(resName, "observability.deployment_id", secondResName, "id"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "true"), + resource.TestCheckResourceAttr(resName, "observability.logs", "true"), ), }, { Config: secondCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", secondResName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "false"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "true"), + resource.TestCheckResourceAttrPair(resName, "observability.deployment_id", secondResName, "id"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "false"), + resource.TestCheckResourceAttr(resName, "observability.logs", "true"), ), }, { Config: thirdCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttrPair(resName, "observability.0.deployment_id", secondResName, "id"), - resource.TestCheckResourceAttr(resName, "observability.0.metrics", "true"), - resource.TestCheckResourceAttr(resName, "observability.0.logs", "false"), + resource.TestCheckResourceAttrPair(resName, "observability.deployment_id", secondResName, "id"), + resource.TestCheckResourceAttr(resName, "observability.metrics", "true"), + resource.TestCheckResourceAttr(resName, "observability.logs", "false"), ), }, { Config: fourthCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "observability.#", "0"), - resource.TestCheckNoResourceAttr(resName, "observability.0.deployment_id"), - resource.TestCheckNoResourceAttr(resName, "observability.0.metrics"), - resource.TestCheckNoResourceAttr(resName, "observability.0.logs"), - resource.TestCheckNoResourceAttr(resName, "observability.0.ref_id"), + resource.TestCheckNoResourceAttr(resName, "observability.deployment_id"), + resource.TestCheckNoResourceAttr(resName, "observability.metrics"), + resource.TestCheckNoResourceAttr(resName, "observability.logs"), + resource.TestCheckNoResourceAttr(resName, "observability.ref_id"), ), }, }, diff --git a/ec/acc/deployment_observability_tpl_test.go b/ec/acc/deployment_observability_tpl_test.go index 5d8d7da77..ddee3a6ac 100644 --- a/ec/acc/deployment_observability_tpl_test.go +++ b/ec/acc/deployment_observability_tpl_test.go @@ -33,68 +33,47 @@ func TestAccDeployment_observabilityTpl(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), observabilityTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create an Observability deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_post_node_role_upgrade_test.go b/ec/acc/deployment_post_node_role_upgrade_test.go index 0869cdda9..358329165 100644 --- a/ec/acc/deployment_post_node_role_upgrade_test.go +++ b/ec/acc/deployment_post_node_role_upgrade_test.go @@ -37,48 +37,46 @@ func TestAccDeployment_post_node_roles(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { Config: cfgF(upgradeVersionCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_pre_node_role_migration_test.go b/ec/acc/deployment_pre_node_role_migration_test.go index f20cd68ee..71882f5e7 100644 --- a/ec/acc/deployment_pre_node_role_migration_test.go +++ b/ec/acc/deployment_pre_node_role_migration_test.go @@ -38,83 +38,76 @@ func TestAccDeployment_pre_node_roles(t *testing.T) { } resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: cfgF(startCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_data", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ingest", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_master", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ml", "false"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_roles.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { Config: cfgF(upgradeVersionCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", "true"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", "false"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_data", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ingest", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_master", "true"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_type_ml", "false"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.node_roles.#", "0"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { Config: cfgF(addWarmTopologyCfg), Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "2"), - // Hot - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_roles.#", "0"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "1g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.hot.node_type_ml"), // Warm - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.id", "warm"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.1.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.1.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.warm.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.warm.zone_count", "1"), + + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_data"), + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_ingest"), + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_master"), + resource.TestCheckNoResourceAttr(resName, "elastic.warm.node_type_ml"), - resource.TestCheckResourceAttr(resName, "kibana.#", "0"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckNoResourceAttr(resName, "kibana"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_security_test.go b/ec/acc/deployment_security_test.go index 5535ad6e1..9fc27d324 100644 --- a/ec/acc/deployment_security_test.go +++ b/ec/acc/deployment_security_test.go @@ -33,65 +33,45 @@ func TestAccDeployment_security(t *testing.T) { secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, secondCfg, randomName, getRegion(), securityTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { // Create a Security deployment with the default settings. Config: cfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "8g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "apm"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, { // Change the Elasticsearch topology size and add APM instance. Config: secondConfigCfg, Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "2g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "apm.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "apm.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size", "2g"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.size_resource", "memory"), + resource.TestCheckResourceAttrSet(resName, "elasticsearch.topology.hot_content.node_roles.#"), + resource.TestCheckResourceAttr(resName, "elasticsearch.topology.hot_content.zone_count", "2"), + resource.TestCheckResourceAttr(resName, "kibana.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "kibana.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "kibana.size", "1g"), + resource.TestCheckResourceAttr(resName, "kibana.size_resource", "memory"), + resource.TestCheckResourceAttr(resName, "apm.zone_count", "1"), + resource.TestCheckResourceAttrSet(resName, "apm.instance_configuration_id"), + resource.TestCheckResourceAttr(resName, "apm.size", "1g"), + resource.TestCheckResourceAttr(resName, "apm.size_resource", "memory"), + resource.TestCheckNoResourceAttr(resName, "enterprise_search"), ), }, }, diff --git a/ec/acc/deployment_snapshot_test.go b/ec/acc/deployment_snapshot_test.go index 739a96edb..1a9adc493 100644 --- a/ec/acc/deployment_snapshot_test.go +++ b/ec/acc/deployment_snapshot_test.go @@ -38,9 +38,9 @@ func TestAccDeployment_snapshot_restore(t *testing.T) { t.Skip("skipped due flakiness: https://github.com/elastic/terraform-provider-ec/issues/443") var esCreds creds resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentDestroy, Steps: []resource.TestStep{ { Config: fixtureDeploymentDefaults(t, "testdata/deployment_snapshot_1.tf"), @@ -80,7 +80,7 @@ func readEsCredentials(t *testing.T, esCreds *creds) resource.TestCheckFunc { continue } - esCreds.URL = rs.Primary.Attributes["elasticsearch.0.https_endpoint"] + esCreds.URL = rs.Primary.Attributes["elasticsearch.https_endpoint"] esCreds.User = rs.Primary.Attributes["elasticsearch_username"] esCreds.Pass = rs.Primary.Attributes["elasticsearch_password"] } diff --git a/ec/acc/deployment_sweep_test.go b/ec/acc/deployment_sweep_test.go index 01ab91e65..7909ea688 100644 --- a/ec/acc/deployment_sweep_test.go +++ b/ec/acc/deployment_sweep_test.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/models" @@ -30,7 +32,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/plan" "github.com/elastic/cloud-sdk-go/pkg/plan/planutil" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func init() { diff --git a/ec/acc/deployment_template_migration_test.go b/ec/acc/deployment_template_migration_test.go deleted file mode 100644 index 9b01f3d9d..000000000 --- a/ec/acc/deployment_template_migration_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package acc - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" -) - -func TestAccDeployment_template_migration(t *testing.T) { - resName := "ec_deployment.compute_optimized" - randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) - basicCfg := "testdata/deployment_compute_optimized_1.tf" - region := getRegion() - cfg := fixtureAccDeploymentResourceBasicDefaults(t, basicCfg, randomName, region, computeOpTemplate) - secondConfigCfg := fixtureAccDeploymentResourceBasicDefaults(t, basicCfg, randomName, region, memoryOpTemplate) - - resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentDestroy, - Steps: []resource.TestStep{ - { - // Create a Compute Optimized deployment with the default settings. - Config: cfg, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "deployment_template_id", setDefaultTemplate(region, computeOpTemplate)), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), - ), - }, - { - // Change the deployment to memory optimized - Config: secondConfigCfg, - Check: resource.ComposeAggregateTestCheckFunc( - resource.TestCheckResourceAttr(resName, "deployment_template_id", setDefaultTemplate(region, memoryOpTemplate)), - resource.TestCheckResourceAttr(resName, "elasticsearch.#", "1"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size", "8g"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_data", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ingest", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_master", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.node_type_ml", ""), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.id", "hot_content"), - resource.TestCheckResourceAttrSet(resName, "elasticsearch.0.topology.0.node_roles.#"), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.topology.0.zone_count", "2"), - resource.TestCheckResourceAttr(resName, "kibana.#", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.zone_count", "1"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.#", "1"), - resource.TestCheckResourceAttrSet(resName, "kibana.0.topology.0.instance_configuration_id"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size", "1g"), - resource.TestCheckResourceAttr(resName, "kibana.0.topology.0.size_resource", "memory"), - resource.TestCheckResourceAttr(resName, "apm.#", "0"), - resource.TestCheckResourceAttr(resName, "enterprise_search.#", "0"), - ), - }, - }, - }) -} diff --git a/ec/acc/deployment_traffic_filter_association_test.go b/ec/acc/deployment_traffic_filter_association_test.go index ba61bb3eb..e279f5a13 100644 --- a/ec/acc/deployment_traffic_filter_association_test.go +++ b/ec/acc/deployment_traffic_filter_association_test.go @@ -38,9 +38,9 @@ func TestAccDeploymentTrafficFilterAssociation_basic(t *testing.T) { updateConfigCfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, updateCfg, randomNameSecond, getRegion(), defaultTemplate) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentTrafficFilterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { // Expects a non-empty plan since "ec_deployment.traffic_filter" @@ -72,6 +72,56 @@ func TestAccDeploymentTrafficFilterAssociation_basic(t *testing.T) { }) } +func TestAccDeploymentTrafficFilterAssociation_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + + resName := "ec_deployment_traffic_filter.tf_assoc" + resAssocName := "ec_deployment_traffic_filter_association.tf_assoc" + randomName := acctest.RandomWithPrefix(prefix) + startCfg := "testdata/deployment_traffic_filter_association_basic_041.tf" + ignoreChangesCfgFile := "testdata/deployment_traffic_filter_association_basic_ignore_changes.tf" + cfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, startCfg, randomName, getRegion(), defaultTemplate) + ignoreChangesCfg := fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t, ignoreChangesCfgFile, randomName, getRegion(), defaultTemplate) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + // Expects a non-empty plan since "ec_deployment.traffic_filter" + // will have changes due to the traffic filter association. + ExpectNonEmptyPlan: true, + Config: cfg, + Check: checkBasicDeploymentTrafficFilterAssociationResource( + resName, resAssocName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: ignoreChangesCfg, + Check: checkBasicDeploymentTrafficFilterAssociationResource( + resName, resAssocName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + }, + }) +} + func fixtureAccDeploymentTrafficFilterResourceAssociationBasic(t *testing.T, fileName, name, region, depTpl string) string { t.Helper() diff --git a/ec/acc/deployment_traffic_filter_checks_test.go b/ec/acc/deployment_traffic_filter_checks_test.go index 5170283da..1e15360d6 100644 --- a/ec/acc/deployment_traffic_filter_checks_test.go +++ b/ec/acc/deployment_traffic_filter_checks_test.go @@ -20,10 +20,11 @@ package acc import ( "fmt" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" ) func testAccCheckDeploymentTrafficFilterExists(name string) resource.TestCheckFunc { diff --git a/ec/acc/deployment_traffic_filter_destroy_test.go b/ec/acc/deployment_traffic_filter_destroy_test.go index 2297c1888..a9568c0ac 100644 --- a/ec/acc/deployment_traffic_filter_destroy_test.go +++ b/ec/acc/deployment_traffic_filter_destroy_test.go @@ -20,9 +20,10 @@ package acc import ( "fmt" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" ) func testAccDeploymentTrafficFilterDestroy(s *terraform.State) error { diff --git a/ec/acc/deployment_traffic_filter_sweep_test.go b/ec/acc/deployment_traffic_filter_sweep_test.go index eac607b54..54bd5241d 100644 --- a/ec/acc/deployment_traffic_filter_sweep_test.go +++ b/ec/acc/deployment_traffic_filter_sweep_test.go @@ -21,10 +21,11 @@ import ( "strings" "sync" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" ) func init() { diff --git a/ec/acc/deployment_traffic_filter_test.go b/ec/acc/deployment_traffic_filter_test.go index 3a6fc996e..c22898b8d 100644 --- a/ec/acc/deployment_traffic_filter_test.go +++ b/ec/acc/deployment_traffic_filter_test.go @@ -38,9 +38,9 @@ func TestAccDeploymentTrafficFilter_basic(t *testing.T) { updateLargeConfigCfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, updateLargeCfg, randomName, getRegion()) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentTrafficFilterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -96,9 +96,9 @@ func TestAccDeploymentTrafficFilter_azure(t *testing.T) { cfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, startCfg, randomName, "azure-australiaeast") resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, - CheckDestroy: testAccDeploymentTrafficFilterDestroy, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, Steps: []resource.TestStep{ { Config: cfg, @@ -113,6 +113,48 @@ func TestAccDeploymentTrafficFilter_azure(t *testing.T) { }) } +func TestAccDeploymentTrafficFilter_UpgradeFrom0_4_1(t *testing.T) { + t.Skip("skip until `ec_deployment` state upgrade is implemented") + + resName := "ec_deployment_traffic_filter.basic" + randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) + startCfg := "testdata/deployment_traffic_filter_basic.tf" + cfg := fixtureAccDeploymentTrafficFilterResourceBasic(t, startCfg, randomName, getRegion()) + + resource.ParallelTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + CheckDestroy: testAccDeploymentTrafficFilterDestroy, + Steps: []resource.TestStep{ + { + ExternalProviders: map[string]resource.ExternalProvider{ + "ec": { + VersionConstraint: "0.4.1", + Source: "elastic/ec", + }, + }, + Config: cfg, + Check: checkBasicDeploymentTrafficFilterResource(resName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + { + PlanOnly: true, + ProtoV6ProviderFactories: testAccProviderFactory, + Config: cfg, + Check: checkBasicDeploymentTrafficFilterResource(resName, randomName, + resource.TestCheckResourceAttr(resName, "include_by_default", "false"), + resource.TestCheckResourceAttr(resName, "type", "ip"), + resource.TestCheckResourceAttr(resName, "rule.#", "1"), + resource.TestCheckResourceAttr(resName, "rule.0.source", "0.0.0.0/0"), + ), + }, + }, + }) +} + func fixtureAccDeploymentTrafficFilterResourceBasic(t *testing.T, fileName, name, region string) string { t.Helper() b, err := os.ReadFile(fileName) diff --git a/ec/acc/deployment_with_extension_bundle_test.go b/ec/acc/deployment_with_extension_bundle_test.go index c05ab8e91..dabfdc618 100644 --- a/ec/acc/deployment_with_extension_bundle_test.go +++ b/ec/acc/deployment_with_extension_bundle_test.go @@ -23,10 +23,11 @@ import ( "path/filepath" "testing" - "github.com/elastic/cloud-sdk-go/pkg/multierror" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/acctest" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + + "github.com/elastic/cloud-sdk-go/pkg/multierror" ) func TestAccDeployment_withExtension(t *testing.T) { @@ -35,7 +36,10 @@ func TestAccDeployment_withExtension(t *testing.T) { randomName := prefix + acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) filePath := filepath.Join(os.TempDir(), "extension.zip") - defer os.Remove(filePath) + + // TODO: this causes the test to fail with the invalid file error + // however we need find a way to delete the temp file + // defer os.Remove(filePath) cfg := fixtureAccDeploymentWithExtensionBundle(t, "testdata/deployment_with_extension_bundle_file.tf", @@ -43,8 +47,8 @@ func TestAccDeployment_withExtension(t *testing.T) { ) resource.ParallelTest(t, resource.TestCase{ - PreCheck: func() { testAccPreCheck(t) }, - ProviderFactories: testAccProviderFactory, + PreCheck: func() { testAccPreCheck(t) }, + ProtoV6ProviderFactories: testAccProviderFactory, CheckDestroy: func(s *terraform.State) error { merr := multierror.NewPrefixed("checking resource with extension") @@ -67,8 +71,8 @@ func TestAccDeployment_withExtension(t *testing.T) { resource.TestCheckResourceAttr(extResName, "description", "desc"), resource.TestCheckResourceAttr(extResName, "extension_type", "bundle"), resource.TestCheckResourceAttr(extResName, "file_path", filePath), - resource.TestCheckResourceAttr(resName, "elasticsearch.0.extension.#", "1"), - resource.TestCheckTypeSetElemNestedAttrs(resName, "elasticsearch.0.extension.*", map[string]string{ + resource.TestCheckResourceAttr(resName, "elasticsearch.extension.#", "1"), + resource.TestCheckTypeSetElemNestedAttrs(resName, "elasticsearch.extension.*", map[string]string{ "type": "bundle", "name": randomName, }), diff --git a/ec/acc/testdata/datasource_deployment_basic.tf b/ec/acc/testdata/datasource_deployment_basic.tf index 4692944e1..59b324262 100644 --- a/ec/acc/testdata/datasource_deployment_basic.tf +++ b/ec/acc/testdata/datasource_deployment_basic.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "basic_observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -25,21 +27,23 @@ resource "ec_deployment" "basic_datasource" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} + apm = {} - enterprise_search {} + enterprise_search = {} - observability { + observability = { deployment_id = ec_deployment.basic_observability.id } @@ -85,4 +89,4 @@ data "ec_deployments" "query" { depends_on = [ ec_deployment.basic_datasource, ] -} \ No newline at end of file +} diff --git a/ec/acc/testdata/datasource_stack_latest.tf b/ec/acc/testdata/datasource_stack_latest.tf index 11e87ea80..de873669e 100644 --- a/ec/acc/testdata/datasource_stack_latest.tf +++ b/ec/acc/testdata/datasource_stack_latest.tf @@ -2,4 +2,4 @@ data "ec_stack" "latest" { version_regex = "latest" lock = true region = "%s" -} \ No newline at end of file +} diff --git a/ec/acc/testdata/datasource_stack_regex.tf b/ec/acc/testdata/datasource_stack_regex.tf index 4f5a7624e..3d15ae5ce 100644 --- a/ec/acc/testdata/datasource_stack_regex.tf +++ b/ec/acc/testdata/datasource_stack_regex.tf @@ -1,4 +1,4 @@ data "ec_stack" "regex" { - version_regex = "7.0.?" + version_regex = "8.4.?" region = "%s" -} \ No newline at end of file +} diff --git a/ec/acc/testdata/datasource_tags.tf b/ec/acc/testdata/datasource_tags.tf index 78bf36c6b..a25c2769d 100644 --- a/ec/acc/testdata/datasource_tags.tf +++ b/ec/acc/testdata/datasource_tags.tf @@ -14,11 +14,13 @@ resource "ec_deployment" "tags" { "test_id" = "%s" } - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -31,4 +33,4 @@ data "ec_deployments" "tagfilter" { tags = { "test_id" = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_autoscaling_1.tf b/ec/acc/testdata/deployment_autoscaling_1.tf index c5db2fcef..6461d7d53 100644 --- a/ec/acc/testdata/deployment_autoscaling_1.tf +++ b/ec/acc/testdata/deployment_autoscaling_1.tf @@ -9,44 +9,45 @@ resource "ec_deployment" "autoscaling" { version = data.ec_stack.autoscaling.version deployment_template_id = "%s" - elasticsearch { + elasticsearch = { autoscale = "true" - topology { - id = "cold" - size = "0g" - zone_count = 1 - } + topology = { + "cold" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - topology { - id = "frozen" - size = "0g" - zone_count = 1 - } + "frozen" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - topology { - id = "hot_content" - size = "1g" - zone_count = 1 - autoscaling { - max_size = "8g" + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = { + max_size = "8g" + } } - } - topology { - id = "ml" - size = "1g" - zone_count = 1 - autoscaling { - min_size = "1g" - max_size = "4g" + + "ml" = { + size = "1g" + zone_count = 1 + autoscaling = { + min_size = "1g" + max_size = "4g" + } } - } - topology { - id = "warm" - size = "2g" - zone_count = 1 - autoscaling { - max_size = "15g" + + "warm" = { + size = "2g" + zone_count = 1 + autoscaling = { + max_size = "15g" + } } } } diff --git a/ec/acc/testdata/deployment_autoscaling_2.tf b/ec/acc/testdata/deployment_autoscaling_2.tf index d3602c960..718e4015d 100644 --- a/ec/acc/testdata/deployment_autoscaling_2.tf +++ b/ec/acc/testdata/deployment_autoscaling_2.tf @@ -9,44 +9,45 @@ resource "ec_deployment" "autoscaling" { version = data.ec_stack.autoscaling.version deployment_template_id = "%s" - elasticsearch { + elasticsearch = { autoscale = "false" - topology { - id = "cold" - size = "0g" - zone_count = 1 - } + topology = { + "cold" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - topology { - id = "frozen" - size = "0g" - zone_count = 1 - } + "frozen" = { + size = "0g" + zone_count = 1 + autoscaling = {} + } - topology { - id = "hot_content" - size = "1g" - zone_count = 1 - autoscaling { - max_size = "8g" + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = { + max_size = "8g" + } } - } - topology { - id = "ml" - size = "0g" - zone_count = 1 - autoscaling { - min_size = "0g" - max_size = "4g" + + "ml" = { + size = "0g" + zone_count = 1 + autoscaling = { + min_size = "0g" + max_size = "4g" + } } - } - topology { - id = "warm" - size = "2g" - zone_count = 1 - autoscaling { - max_size = "15g" + + "warm" = { + size = "2g" + zone_count = 1 + autoscaling = { + max_size = "15g" + } } } } diff --git a/ec/acc/testdata/deployment_basic.tf b/ec/acc/testdata/deployment_basic.tf index 5ef30a6b4..7920976fe 100644 --- a/ec/acc/testdata/deployment_basic.tf +++ b/ec/acc/testdata/deployment_basic.tf @@ -10,28 +10,24 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana { - topology { - instance_configuration_id = "%s" - } + kibana = { + instance_configuration_id = "%s" } - apm { - topology { - instance_configuration_id = "%s" - } + apm = { + instance_configuration_id = "%s" } - enterprise_search { - topology { - instance_configuration_id = "%s" - } + enterprise_search = { + instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_1.tf b/ec/acc/testdata/deployment_basic_defaults_1.tf index 35ac7d658..c8e451820 100644 --- a/ec/acc/testdata/deployment_basic_defaults_1.tf +++ b/ec/acc/testdata/deployment_basic_defaults_1.tf @@ -9,13 +9,18 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } - kibana {} - - enterprise_search { - topology { - zone_count = 1 } } -} \ No newline at end of file + + kibana = {} + + enterprise_search = { + zone_count = 1 + } +} diff --git a/ec/acc/testdata/deployment_basic_defaults_2.tf b/ec/acc/testdata/deployment_basic_defaults_2.tf index 430c283da..8ff1ab50e 100644 --- a/ec/acc/testdata/deployment_basic_defaults_2.tf +++ b/ec/acc/testdata/deployment_basic_defaults_2.tf @@ -9,23 +9,23 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} - - kibana { - topology { - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } } } - apm { - topology { - size = "1g" - } + kibana = { + size = "2g" } - enterprise_search { - topology { - zone_count = 1 - } + apm = { + size = "1g" } -} \ No newline at end of file + + enterprise_search = { + zone_count = 1 + } +} diff --git a/ec/acc/testdata/deployment_basic_defaults_3.tf b/ec/acc/testdata/deployment_basic_defaults_3.tf index 77e0eb480..bcbf9ae4b 100644 --- a/ec/acc/testdata/deployment_basic_defaults_3.tf +++ b/ec/acc/testdata/deployment_basic_defaults_3.tf @@ -9,12 +9,14 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana {} -} \ No newline at end of file + kibana = {} +} diff --git a/ec/acc/testdata/deployment_basic_defaults_hw_1.tf b/ec/acc/testdata/deployment_basic_defaults_hw_1.tf index 69095efd1..650fac4a7 100644 --- a/ec/acc/testdata/deployment_basic_defaults_hw_1.tf +++ b/ec/acc/testdata/deployment_basic_defaults_hw_1.tf @@ -9,10 +9,12 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_defaults_hw_2.tf b/ec/acc/testdata/deployment_basic_defaults_hw_2.tf index d3561ab25..5bd5d6cad 100644 --- a/ec/acc/testdata/deployment_basic_defaults_hw_2.tf +++ b/ec/acc/testdata/deployment_basic_defaults_hw_2.tf @@ -9,16 +9,16 @@ resource "ec_deployment" "defaults" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "4g" - } - topology { - id = "warm" - size = "4g" + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + "warm" = { + autoscaling = {} + } } } - kibana {} + kibana = {} } diff --git a/ec/acc/testdata/deployment_basic_integrations_server_1.tf b/ec/acc/testdata/deployment_basic_integrations_server_1.tf index 87ee0c877..f99f85343 100644 --- a/ec/acc/testdata/deployment_basic_integrations_server_1.tf +++ b/ec/acc/testdata/deployment_basic_integrations_server_1.tf @@ -9,10 +9,16 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} + kibana = {} - integrations_server {} + integrations_server = {} } diff --git a/ec/acc/testdata/deployment_basic_integrations_server_2.tf b/ec/acc/testdata/deployment_basic_integrations_server_2.tf index 6c1e1d70b..e8fff7cb5 100644 --- a/ec/acc/testdata/deployment_basic_integrations_server_2.tf +++ b/ec/acc/testdata/deployment_basic_integrations_server_2.tf @@ -9,13 +9,17 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} + kibana = {} - integrations_server { - topology { - zone_count = 2 - } + integrations_server = { + zone_count = 2 } } diff --git a/ec/acc/testdata/deployment_basic_settings_config_1.tf b/ec/acc/testdata/deployment_basic_settings_config_1.tf index d54ed1440..3db5cdec6 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_1.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_1.tf @@ -9,28 +9,24 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana { - topology { - instance_configuration_id = "%s" - } + kibana = { + instance_configuration_id = "%s" } - apm { - topology { - instance_configuration_id = "%s" - } + apm = { + instance_configuration_id = "%s" } - enterprise_search { - topology { - instance_configuration_id = "%s" - } + enterprise_search = { + instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_settings_config_2.tf b/ec/acc/testdata/deployment_basic_settings_config_2.tf index 9cfb4f994..cdb457544 100644 --- a/ec/acc/testdata/deployment_basic_settings_config_2.tf +++ b/ec/acc/testdata/deployment_basic_settings_config_2.tf @@ -9,41 +9,40 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - config { + elasticsearch = { + config = { user_settings_yaml = "action.auto_create_index: true" } - topology { - id = "hot_content" - size = "1g" + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana { - config { + kibana = { + config = { user_settings_yaml = "csp.warnLegacyBrowsers: true" } - topology { - instance_configuration_id = "%s" - } + + instance_configuration_id = "%s" } - apm { - config { + apm = { + config = { debug_enabled = true user_settings_json = jsonencode({ "apm-server.rum.enabled" = true }) } - topology { - instance_configuration_id = "%s" - } + + instance_configuration_id = "%s" } - enterprise_search { - config { + enterprise_search = { + config = { user_settings_yaml = "# comment" } - topology { - instance_configuration_id = "%s" - } + + instance_configuration_id = "%s" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_settings_config_import.tf b/ec/acc/testdata/deployment_basic_settings_config_import.tf new file mode 100644 index 000000000..ef77aaeae --- /dev/null +++ b/ec/acc/testdata/deployment_basic_settings_config_import.tf @@ -0,0 +1,58 @@ +data "ec_stack" "latest" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "basic" { + name = "%s" + region = "%s" + version = data.ec_stack.latest.version + deployment_template_id = "%s" + + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } + + "warm" = { + autoscaling = {} + } + + "cold" = { + autoscaling = {} + } + + "frozen" = { + autoscaling = {} + } + + "ml" = { + autoscaling = {} + } + + "master" = { + autoscaling = {} + } + + "coordinating" = { + autoscaling = {} + } + } + + config = {} + } + + kibana = { + instance_configuration_id = "%s" + } + + apm = { + instance_configuration_id = "%s" + } + + enterprise_search = { + instance_configuration_id = "%s" + } +} diff --git a/ec/acc/testdata/deployment_basic_tags_1.tf b/ec/acc/testdata/deployment_basic_tags_1.tf index 46e8e19a8..c0d4f9392 100644 --- a/ec/acc/testdata/deployment_basic_tags_1.tf +++ b/ec/acc/testdata/deployment_basic_tags_1.tf @@ -9,10 +9,12 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } @@ -20,4 +22,4 @@ resource "ec_deployment" "tags" { owner = "elastic" cost-center = "rnd" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_2.tf b/ec/acc/testdata/deployment_basic_tags_2.tf index 48b95175b..4d2ca4ff7 100644 --- a/ec/acc/testdata/deployment_basic_tags_2.tf +++ b/ec/acc/testdata/deployment_basic_tags_2.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } tags = { owner = "elastic" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_3.tf b/ec/acc/testdata/deployment_basic_tags_3.tf index f46b5a7cd..e3be70428 100644 --- a/ec/acc/testdata/deployment_basic_tags_3.tf +++ b/ec/acc/testdata/deployment_basic_tags_3.tf @@ -9,10 +9,12 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_tags_4.tf b/ec/acc/testdata/deployment_basic_tags_4.tf index 9edfd4503..f6336ac8f 100644 --- a/ec/acc/testdata/deployment_basic_tags_4.tf +++ b/ec/acc/testdata/deployment_basic_tags_4.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "tags" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } tags = { new = "tag" } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf b/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf index b397de690..294f9798f 100644 --- a/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf +++ b/ec/acc/testdata/deployment_basic_with_traffic_filter_2.tf @@ -9,18 +9,20 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} + apm = {} - enterprise_search {} + enterprise_search = {} traffic_filter = [ ec_deployment_traffic_filter.default.id, diff --git a/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf b/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf index 1e975039e..dde629a7f 100644 --- a/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf +++ b/ec/acc/testdata/deployment_basic_with_traffic_filter_3.tf @@ -9,18 +9,20 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} + apm = {} - enterprise_search {} + enterprise_search = {} traffic_filter = [ ec_deployment_traffic_filter.second.id, diff --git a/ec/acc/testdata/deployment_ccs_1.tf b/ec/acc/testdata/deployment_ccs_1.tf index bfa969f70..846dc1e62 100644 --- a/ec/acc/testdata/deployment_ccs_1.tf +++ b/ec/acc/testdata/deployment_ccs_1.tf @@ -9,14 +9,19 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - dynamic "remote_cluster" { - for_each = ec_deployment.source_ccs - content { - deployment_id = remote_cluster.value.id - alias = remote_cluster.value.name + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} } } + + "remote_cluster" = [for source_css in ec_deployment.source_ccs : + { + deployment_id = source_css.id + alias = source_css.name + } + ] } } @@ -27,11 +32,13 @@ resource "ec_deployment" "source_ccs" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + zone_count = 1 + size = "1g" + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_ccs_2.tf b/ec/acc/testdata/deployment_ccs_2.tf index 7d385694d..821097202 100644 --- a/ec/acc/testdata/deployment_ccs_2.tf +++ b/ec/acc/testdata/deployment_ccs_2.tf @@ -9,12 +9,14 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } - kibana {} -} \ No newline at end of file + kibana = {} +} diff --git a/ec/acc/testdata/deployment_compute_optimized_1.tf b/ec/acc/testdata/deployment_compute_optimized_1.tf index 0512241d1..5e0124696 100644 --- a/ec/acc/testdata/deployment_compute_optimized_1.tf +++ b/ec/acc/testdata/deployment_compute_optimized_1.tf @@ -9,7 +9,13 @@ resource "ec_deployment" "compute_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} -} \ No newline at end of file + kibana = {} +} diff --git a/ec/acc/testdata/deployment_compute_optimized_2.tf b/ec/acc/testdata/deployment_compute_optimized_2.tf index ab5c27138..d72704871 100644 --- a/ec/acc/testdata/deployment_compute_optimized_2.tf +++ b/ec/acc/testdata/deployment_compute_optimized_2.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "compute_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} -} \ No newline at end of file + apm = {} +} diff --git a/ec/acc/testdata/deployment_dedicated_coordinating.tf b/ec/acc/testdata/deployment_dedicated_coordinating.tf index 4f47a80d8..a9aa4af2e 100644 --- a/ec/acc/testdata/deployment_dedicated_coordinating.tf +++ b/ec/acc/testdata/deployment_dedicated_coordinating.tf @@ -9,21 +9,25 @@ resource "ec_deployment" "dedicated_coordinating" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "coordinating" - zone_count = 2 - size = "1g" - } - topology { - id = "hot_content" - zone_count = 1 - size = "1g" - } - topology { - id = "warm" - zone_count = 1 - size = "2g" + elasticsearch = { + topology = { + "coordinating" = { + zone_count = 2 + size = "1g" + autoscaling = {} + } + + "hot_content" = { + zone_count = 1 + size = "1g" + autoscaling = {} + } + + "warm" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_dedicated_master.tf b/ec/acc/testdata/deployment_dedicated_master.tf index 4ec815812..6c29f0452 100644 --- a/ec/acc/testdata/deployment_dedicated_master.tf +++ b/ec/acc/testdata/deployment_dedicated_master.tf @@ -9,26 +9,31 @@ resource "ec_deployment" "dedicated_master" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "cold" - zone_count = 1 - size = "2g" - } - topology { - id = "hot_content" - zone_count = 3 - size = "1g" - } - topology { - id = "master" - zone_count = 3 - size = "1g" - } - topology { - id = "warm" - zone_count = 2 - size = "2g" + elasticsearch = { + topology = { + "cold" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } + + "hot_content" = { + zone_count = 3 + size = "1g" + autoscaling = {} + } + + "master" = { + zone_count = 3 + size = "1g" + autoscaling = {} + } + + "warm" = { + zone_count = 2 + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_docker_image_override.tf b/ec/acc/testdata/deployment_docker_image_override.tf index c0ad75f96..96f0a76d0 100644 --- a/ec/acc/testdata/deployment_docker_image_override.tf +++ b/ec/acc/testdata/deployment_docker_image_override.tf @@ -15,35 +15,37 @@ resource "ec_deployment" "docker_image" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - config { + elasticsearch = { + config = { docker_image = "docker.elastic.co/cloud-ci/elasticsearch:7.15.0-SNAPSHOT" } - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - kibana { - config { + kibana = { + config = { docker_image = "docker.elastic.co/cloud-ci/kibana:7.15.0-SNAPSHOT" } } - apm { - config { + apm = { + config = { docker_image = "docker.elastic.co/cloud-ci/apm:7.15.0-SNAPSHOT" } } - enterprise_search { - config { + enterprise_search = { + config = { docker_image = "docker.elastic.co/cloud-ci/enterprise-search:7.15.0-SNAPSHOT" } - topology { - zone_count = 1 - } + + zone_count = 1 } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf index 094e23a3b..5733d1f3a 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf new file mode 100644 index 000000000..094e23a3b --- /dev/null +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1_041.tf @@ -0,0 +1,32 @@ +data "ec_stack" "keystore" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "keystore" { + name = "%s" + region = "%s" + version = data.ec_stack.keystore.version + deployment_template_id = "%s" + + elasticsearch { + topology { + id = "hot_content" + size = "1g" + zone_count = 1 + } + } +} + +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = ec_deployment.keystore.id + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = "hella" +} + +resource "ec_deployment_elasticsearch_keystore" "gcs_creds" { + deployment_id = ec_deployment.keystore.id + setting_name = "gcs.client.secondary.credentials_file" + value = file("testdata/deployment_elasticsearch_keystore_creds.json") +} + diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf new file mode 100644 index 000000000..5733d1f3a --- /dev/null +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_1_migrated.tf @@ -0,0 +1,34 @@ +data "ec_stack" "keystore" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "keystore" { + name = "%s" + region = "%s" + version = data.ec_stack.keystore.version + deployment_template_id = "%s" + + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } + } + } +} + +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = ec_deployment.keystore.id + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = "hella" +} + +resource "ec_deployment_elasticsearch_keystore" "gcs_creds" { + deployment_id = ec_deployment.keystore.id + setting_name = "gcs.client.secondary.credentials_file" + value = file("testdata/deployment_elasticsearch_keystore_creds.json") +} + diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf index 4a04a7c2f..d9a2ac720 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_2.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf index 572e49b21..7cabe7f26 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_3.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf b/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf index 85c3f2fb4..670e41b1c 100644 --- a/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf +++ b/ec/acc/testdata/deployment_elasticsearch_keystore_4.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "keystore" { version = data.ec_stack.keystore.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_emptyconfig.tf b/ec/acc/testdata/deployment_emptyconfig.tf index c51f87ca8..ce2647acd 100644 --- a/ec/acc/testdata/deployment_emptyconfig.tf +++ b/ec/acc/testdata/deployment_emptyconfig.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "emptyconfig" { version = data.ec_stack.emptyconfig.version deployment_template_id = "%s" - elasticsearch { - config { + elasticsearch = { + config = { user_settings_yaml = null } - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_enterprise_search_1.tf b/ec/acc/testdata/deployment_enterprise_search_1.tf index 1490e541a..0797989b9 100644 --- a/ec/acc/testdata/deployment_enterprise_search_1.tf +++ b/ec/acc/testdata/deployment_enterprise_search_1.tf @@ -9,9 +9,15 @@ resource "ec_deployment" "enterprise_search" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} + kibana = {} - enterprise_search {} -} \ No newline at end of file + enterprise_search = {} +} diff --git a/ec/acc/testdata/deployment_enterprise_search_2.tf b/ec/acc/testdata/deployment_enterprise_search_2.tf index 29eeb4f24..5afc2f3e8 100644 --- a/ec/acc/testdata/deployment_enterprise_search_2.tf +++ b/ec/acc/testdata/deployment_enterprise_search_2.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "enterprise_search" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } - kibana {} + kibana = {} - enterprise_search {} -} \ No newline at end of file + enterprise_search = {} +} diff --git a/ec/acc/testdata/deployment_hotwarm_1.tf b/ec/acc/testdata/deployment_hotwarm_1.tf index 8e693f913..e1f20ca6b 100644 --- a/ec/acc/testdata/deployment_hotwarm_1.tf +++ b/ec/acc/testdata/deployment_hotwarm_1.tf @@ -9,5 +9,15 @@ resource "ec_deployment" "hotwarm" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} -} \ No newline at end of file + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + + "warm" = { + autoscaling = {} + } + } + } +} diff --git a/ec/acc/testdata/deployment_hotwarm_2.tf b/ec/acc/testdata/deployment_hotwarm_2.tf index 03ca44de1..d53fdda74 100644 --- a/ec/acc/testdata/deployment_hotwarm_2.tf +++ b/ec/acc/testdata/deployment_hotwarm_2.tf @@ -9,16 +9,19 @@ resource "ec_deployment" "hotwarm" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "1g" - } - topology { - id = "warm" - zone_count = 1 - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + zone_count = 1 + size = "1g" + autoscaling = {} + } + + "warm" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_memory_optimized_1.tf b/ec/acc/testdata/deployment_memory_optimized_1.tf index dd7421c27..05445f84e 100644 --- a/ec/acc/testdata/deployment_memory_optimized_1.tf +++ b/ec/acc/testdata/deployment_memory_optimized_1.tf @@ -9,7 +9,13 @@ resource "ec_deployment" "memory_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} -} \ No newline at end of file + kibana = {} +} diff --git a/ec/acc/testdata/deployment_memory_optimized_2.tf b/ec/acc/testdata/deployment_memory_optimized_2.tf index ac762c066..d547b04a1 100644 --- a/ec/acc/testdata/deployment_memory_optimized_2.tf +++ b/ec/acc/testdata/deployment_memory_optimized_2.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "memory_optimized" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} -} \ No newline at end of file + apm = {} +} diff --git a/ec/acc/testdata/deployment_observability_1.tf b/ec/acc/testdata/deployment_observability_1.tf index 6811f6eb0..48ccb6663 100644 --- a/ec/acc/testdata/deployment_observability_1.tf +++ b/ec/acc/testdata/deployment_observability_1.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -24,15 +26,17 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - observability { + observability = { deployment_id = ec_deployment.basic.id } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_2.tf b/ec/acc/testdata/deployment_observability_2.tf index 709a77949..79e2dffa6 100644 --- a/ec/acc/testdata/deployment_observability_2.tf +++ b/ec/acc/testdata/deployment_observability_2.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -24,16 +26,18 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - observability { + observability = { deployment_id = ec_deployment.basic.id metrics = false } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_3.tf b/ec/acc/testdata/deployment_observability_3.tf index 976d6a545..0bf364b50 100644 --- a/ec/acc/testdata/deployment_observability_3.tf +++ b/ec/acc/testdata/deployment_observability_3.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -24,16 +26,18 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - observability { + observability = { deployment_id = ec_deployment.basic.id logs = false } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_4.tf b/ec/acc/testdata/deployment_observability_4.tf index 7ee57ac9e..244dca5f6 100644 --- a/ec/acc/testdata/deployment_observability_4.tf +++ b/ec/acc/testdata/deployment_observability_4.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "basic" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } } @@ -24,11 +26,13 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_self.tf b/ec/acc/testdata/deployment_observability_self.tf index 7ab3d8df4..de9152b04 100644 --- a/ec/acc/testdata/deployment_observability_self.tf +++ b/ec/acc/testdata/deployment_observability_self.tf @@ -9,24 +9,24 @@ resource "ec_deployment" "observability" { version = data.ec_stack.latest.version deployment_template_id = "%s" - observability { + observability = { deployment_id = "self" } - elasticsearch { + elasticsearch = { autoscale = "false" - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - kibana { - topology { - size = "1g" - zone_count = 1 - } + kibana = { + size = "1g" + zone_count = 1 } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_observability_tpl_1.tf b/ec/acc/testdata/deployment_observability_tpl_1.tf index 4b475d259..c770687a3 100644 --- a/ec/acc/testdata/deployment_observability_tpl_1.tf +++ b/ec/acc/testdata/deployment_observability_tpl_1.tf @@ -9,9 +9,15 @@ resource "ec_deployment" "observability_tpl" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} + kibana = {} - apm {} -} \ No newline at end of file + apm = {} +} diff --git a/ec/acc/testdata/deployment_observability_tpl_2.tf b/ec/acc/testdata/deployment_observability_tpl_2.tf index a3a87acbf..1c7b46e81 100644 --- a/ec/acc/testdata/deployment_observability_tpl_2.tf +++ b/ec/acc/testdata/deployment_observability_tpl_2.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "observability_tpl" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} -} \ No newline at end of file + apm = {} +} diff --git a/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf b/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf index 2b99eff5f..b1b1d1573 100644 --- a/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf +++ b/ec/acc/testdata/deployment_post_node_roles_upgrade_1.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "post_nr_upgrade" { version = data.ec_stack.post_node_roles_upgrade.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf b/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf index 98a710d17..67685532a 100644 --- a/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf +++ b/ec/acc/testdata/deployment_post_node_roles_upgrade_2.tf @@ -9,11 +9,13 @@ resource "ec_deployment" "post_nr_upgrade" { version = data.ec_stack.post_node_roles_upgrade.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf index 87619cb96..797c560b4 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_1.tf @@ -1,5 +1,5 @@ data "ec_stack" "pre_node_roles" { - version_regex = "7.9.?" + version_regex = "^7\\.9\\.\\d{1,2}$" region = "%s" } @@ -9,11 +9,13 @@ resource "ec_deployment" "pre_nr" { version = data.ec_stack.pre_node_roles.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf index 2d96cbf1f..f958fc44f 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_2.tf @@ -1,5 +1,5 @@ data "ec_stack" "pre_node_roles" { - version_regex = "7.??.?" + version_regex = "^7\\.\\d{1,2}\\.\\d{1,2}$" region = "%s" } @@ -9,11 +9,13 @@ resource "ec_deployment" "pre_nr" { version = data.ec_stack.pre_node_roles.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf index a8729b10e..c46eb113b 100644 --- a/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf +++ b/ec/acc/testdata/deployment_pre_node_roles_migration_3.tf @@ -1,5 +1,5 @@ data "ec_stack" "pre_node_roles" { - version_regex = "7.??.?" + version_regex = "^7\\.\\d{1,2}\\.\\d{1,2}$" region = "%s" } @@ -9,16 +9,19 @@ resource "ec_deployment" "pre_nr" { version = data.ec_stack.pre_node_roles.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 - } - topology { - id = "warm" - size = "2g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } + + "warm" = { + size = "2g" + zone_count = 1 + autoscaling = {} + } } } -} \ No newline at end of file +} diff --git a/ec/acc/testdata/deployment_security_1.tf b/ec/acc/testdata/deployment_security_1.tf index 89140e5f6..fc9c94ca5 100644 --- a/ec/acc/testdata/deployment_security_1.tf +++ b/ec/acc/testdata/deployment_security_1.tf @@ -9,7 +9,13 @@ resource "ec_deployment" "security" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } - kibana {} -} \ No newline at end of file + kibana = {} +} diff --git a/ec/acc/testdata/deployment_security_2.tf b/ec/acc/testdata/deployment_security_2.tf index 6fa8777df..3a71e020b 100644 --- a/ec/acc/testdata/deployment_security_2.tf +++ b/ec/acc/testdata/deployment_security_2.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "security" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + size = "2g" + autoscaling = {} + } } } - kibana {} + kibana = {} - apm {} -} \ No newline at end of file + apm = {} +} diff --git a/ec/acc/testdata/deployment_snapshot_1.tf b/ec/acc/testdata/deployment_snapshot_1.tf index 62500342c..07e035a2f 100644 --- a/ec/acc/testdata/deployment_snapshot_1.tf +++ b/ec/acc/testdata/deployment_snapshot_1.tf @@ -14,10 +14,12 @@ resource "ec_deployment" "snapshot_source" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_snapshot_2.tf b/ec/acc/testdata/deployment_snapshot_2.tf index 08c374005..7f41cd18b 100644 --- a/ec/acc/testdata/deployment_snapshot_2.tf +++ b/ec/acc/testdata/deployment_snapshot_2.tf @@ -14,10 +14,12 @@ resource "ec_deployment" "snapshot_source" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } } @@ -28,14 +30,17 @@ resource "ec_deployment" "snapshot_target" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - snapshot_source { + elasticsearch = { + + snapshot_source = [{ source_elasticsearch_cluster_id = ec_deployment.snapshot_source.elasticsearch.0.resource_id - } + }] - topology { - id = "hot_content" - size = "1g" + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } } diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic.tf index 5a5186876..80ffb1c60 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "tf_assoc" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "tf_assoc" { diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf new file mode 100644 index 000000000..5a5186876 --- /dev/null +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_041.tf @@ -0,0 +1,35 @@ +data "ec_stack" "latest" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "tf_assoc" { + name = "%s" + region = "%s" + version = data.ec_stack.latest.version + deployment_template_id = "%s" + + elasticsearch { + topology { + id = "hot_content" + size = "1g" + } + } + + kibana {} +} + +resource "ec_deployment_traffic_filter" "tf_assoc" { + name = "%s" + region = "%s" + type = "ip" + + rule { + source = "0.0.0.0/0" + } +} + +resource "ec_deployment_traffic_filter_association" "tf_assoc" { + traffic_filter_id = ec_deployment_traffic_filter.tf_assoc.id + deployment_id = ec_deployment.tf_assoc.id +} diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf new file mode 100644 index 000000000..0e4cbc876 --- /dev/null +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_ignore_changes.tf @@ -0,0 +1,41 @@ +data "ec_stack" "latest" { + version_regex = "latest" + region = "%s" +} + +resource "ec_deployment" "tf_assoc" { + name = "%s" + region = "%s" + version = data.ec_stack.latest.version + deployment_template_id = "%s" + + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } + } + } + + kibana = {} + + lifecycle { + ignore_changes = [traffic_filter] + } +} + +resource "ec_deployment_traffic_filter" "tf_assoc" { + name = "%s" + region = "%s" + type = "ip" + + rule { + source = "0.0.0.0/0" + } +} + +resource "ec_deployment_traffic_filter_association" "tf_assoc" { + traffic_filter_id = ec_deployment_traffic_filter.tf_assoc.id + deployment_id = ec_deployment.tf_assoc.id +} diff --git a/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf b/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf index 212ff8d66..3ffd74f67 100644 --- a/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf +++ b/ec/acc/testdata/deployment_traffic_filter_association_basic_update.tf @@ -9,14 +9,16 @@ resource "ec_deployment" "tf_assoc" { version = data.ec_stack.latest.version deployment_template_id = "%s" - elasticsearch { - topology { - id = "hot_content" - size = "1g" + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + autoscaling = {} + } } } - kibana {} + kibana = {} } resource "ec_deployment_traffic_filter" "tf_assoc_second" { diff --git a/ec/acc/testdata/deployment_upgrade_retry_1.tf b/ec/acc/testdata/deployment_upgrade_retry_1.tf index bde4282b4..6ab4ec207 100644 --- a/ec/acc/testdata/deployment_upgrade_retry_1.tf +++ b/ec/acc/testdata/deployment_upgrade_retry_1.tf @@ -14,13 +14,15 @@ resource "ec_deployment" "upgrade_retry" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - kibana {} + kibana = {} } diff --git a/ec/acc/testdata/deployment_upgrade_retry_2.tf b/ec/acc/testdata/deployment_upgrade_retry_2.tf index d45d1ce9e..a9913008c 100644 --- a/ec/acc/testdata/deployment_upgrade_retry_2.tf +++ b/ec/acc/testdata/deployment_upgrade_retry_2.tf @@ -14,13 +14,15 @@ resource "ec_deployment" "upgrade_retry" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - topology { - id = "hot_content" - size = "1g" - zone_count = 1 + elasticsearch = { + topology = { + "hot_content" = { + size = "1g" + zone_count = 1 + autoscaling = {} + } } } - kibana {} + kibana = {} } diff --git a/ec/acc/testdata/deployment_with_extension_bundle_file.tf b/ec/acc/testdata/deployment_with_extension_bundle_file.tf index e44aa9d06..6e83c3b77 100644 --- a/ec/acc/testdata/deployment_with_extension_bundle_file.tf +++ b/ec/acc/testdata/deployment_with_extension_bundle_file.tf @@ -6,7 +6,6 @@ locals { file_path = "%s" } - data "ec_stack" "latest" { version_regex = "latest" region = local.region @@ -18,13 +17,18 @@ resource "ec_deployment" "with_extension" { version = data.ec_stack.latest.version deployment_template_id = local.deployment_template - elasticsearch { - extension { + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + extension = [{ type = "bundle" name = local.name version = data.ec_stack.latest.version url = ec_deployment_extension.my_extension.url - } + }] } } diff --git a/ec/ecdatasource/deploymentdatasource/datasource.go b/ec/ecdatasource/deploymentdatasource/datasource.go index 54ddd549f..afcee8670 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource.go +++ b/ec/ecdatasource/deploymentdatasource/datasource.go @@ -19,39 +19,59 @@ package deploymentdatasource import ( "context" - "time" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -// DataSource returns the ec_deployment data source schema. -func DataSource() *schema.Resource { - return &schema.Resource{ - ReadContext: read, +var _ datasource.DataSource = &DataSource{} +var _ datasource.DataSourceWithConfigure = &DataSource{} - Schema: newSchema(), +type DataSource struct { + client *api.API +} - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +func (d *DataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + d.client = client } -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - deploymentID := d.Get("id").(string) +func (d *DataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment" +} + +func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if d.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return + } + + var newState modelV0 + response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) + if response.Diagnostics.HasError() { + return + } res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: client, - DeploymentID: deploymentID, + API: d.client, + DeploymentID: newState.ID.Value, QueryParams: deputil.QueryParams{ ShowPlans: true, ShowSettings: true, @@ -60,92 +80,64 @@ func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diag }, }) if err != nil { - return diag.FromErr( - multierror.NewPrefixed("failed retrieving deployment information", err), + response.Diagnostics.AddError( + "Failed retrieving deployment information", + fmt.Sprintf("Failed retrieving deployment information: %s", err), ) + return } - d.SetId(deploymentID) - - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + response.Diagnostics.Append(modelToState(ctx, res, &newState)...) + if response.Diagnostics.HasError() { + return } - return nil + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } -func modelToState(d *schema.ResourceData, res *models.DeploymentGetResponse) error { - if err := d.Set("name", res.Name); err != nil { - return err - } - - if err := d.Set("healthy", res.Healthy); err != nil { - return err - } +func modelToState(ctx context.Context, res *models.DeploymentGetResponse, state *modelV0) diag.Diagnostics { + var diagsnostics diag.Diagnostics - if err := d.Set("alias", res.Alias); err != nil { - return err - } + state.Name = types.String{Value: *res.Name} + state.Healthy = types.Bool{Value: *res.Healthy} + state.Alias = types.String{Value: res.Alias} es := res.Resources.Elasticsearch[0] if es.Region != nil { - if err := d.Set("region", *es.Region); err != nil { - return err - } + state.Region = types.String{Value: *es.Region} } if !util.IsCurrentEsPlanEmpty(es) { - if err := d.Set("deployment_template_id", - *es.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID); err != nil { - return err - } + state.DeploymentTemplateID = types.String{Value: *es.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID} } - if settings := flattenTrafficFiltering(res.Settings); settings != nil { - if err := d.Set("traffic_filter", settings); err != nil { - return err - } - } + var diags diag.Diagnostics - if observability := flattenObservability(res.Settings); len(observability) > 0 { - if err := d.Set("observability", observability); err != nil { - return err - } - } + state.TrafficFilter, diags = flattenTrafficFiltering(ctx, res.Settings) + diagsnostics.Append(diags...) - elasticsearchFlattened, err := flattenElasticsearchResources(res.Resources.Elasticsearch) - if err != nil { - return err - } - if err := d.Set("elasticsearch", elasticsearchFlattened); err != nil { - return err - } + state.Observability, diags = flattenObservability(ctx, res.Settings) + diagsnostics.Append(diags...) - kibanaFlattened := flattenKibanaResources(res.Resources.Kibana) - if err := d.Set("kibana", kibanaFlattened); err != nil { - return err - } + state.Elasticsearch, diags = flattenElasticsearchResources(ctx, res.Resources.Elasticsearch) + diagsnostics.Append(diags...) - apmFlattened := flattenApmResources(res.Resources.Apm) - if err := d.Set("apm", apmFlattened); err != nil { - return err - } + state.Kibana, diags = flattenKibanaResources(ctx, res.Resources.Kibana) + diagsnostics.Append(diags...) - integrationsServerFlattened := flattenIntegrationsServerResources(res.Resources.IntegrationsServer) - if err := d.Set("integrations_server", integrationsServerFlattened); err != nil { - return err - } + state.Apm, diags = flattenApmResources(ctx, res.Resources.Apm) + diagsnostics.Append(diags...) - enterpriseSearchFlattened := flattenEnterpriseSearchResources(res.Resources.EnterpriseSearch) - if err := d.Set("enterprise_search", enterpriseSearchFlattened); err != nil { - return err - } + state.IntegrationsServer, diags = flattenIntegrationsServerResources(ctx, res.Resources.IntegrationsServer) + diagsnostics.Append(diags...) + + state.EnterpriseSearch, diags = flattenEnterpriseSearchResources(ctx, res.Resources.EnterpriseSearch) + diagsnostics.Append(diags...) - if tagsFlattened := flattenTags(res.Metadata); tagsFlattened != nil { - if err := d.Set("tags", tagsFlattened); err != nil { - return err - } + if res.Metadata != nil { + state.Tags = converters.ModelsTagsToTypesMap(res.Metadata.Tags) } - return nil + return diagsnostics } diff --git a/ec/ecdatasource/deploymentdatasource/datasource_test.go b/ec/ecdatasource/deploymentdatasource/datasource_test.go index 74fed579c..12ea29998 100644 --- a/ec/ecdatasource/deploymentdatasource/datasource_test.go +++ b/ec/ecdatasource/deploymentdatasource/datasource_test.go @@ -18,42 +18,36 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { - deploymentSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentSchemaArg.SetId(mock.ValidClusterID) - - wantDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeployment(), - Schema: newSchema(), - }) - + wantDeployment := newSampleDeployment() type args struct { - d *schema.ResourceData res *models.DeploymentGetResponse } tests := []struct { name string args args - want *schema.ResourceData + want modelV0 err error }{ { name: "flattens deployment resources", want: wantDeployment, args: args{ - d: deploymentSchemaArg, res: &models.DeploymentGetResponse{ Alias: "some-alias", ID: &mock.ValidClusterID, @@ -135,54 +129,155 @@ func Test_modelToState(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) + model := modelV0{ + ID: types.String{Value: mock.ValidClusterID}, + } + diags := modelToState(context.Background(), tt.args.res, &model) if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) + assert.Equal(t, diags, tt.err) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, model) }) } } -func newSampleDeployment() map[string]interface{} { - return map[string]interface{}{ - "id": mock.ValidClusterID, - "name": "my_deployment_name", - "alias": "some-alias", - "deployment_template_id": "aws-io-optimized", - "healthy": true, - "region": "us-east-1", - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - "observability": []interface{}{newObservabilitySample()}, - "elasticsearch": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "kibana": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "apm": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "integrations_server": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "healthy": true, - }}, - "tags": map[string]interface{}{ - "foo": "bar", +func newSampleDeployment() modelV0 { + return modelV0{ + ID: types.String{Value: mock.ValidClusterID}, + Name: types.String{Value: "my_deployment_name"}, + Alias: types.String{Value: "some-alias"}, + DeploymentTemplateID: types.String{Value: "aws-io-optimized"}, + Healthy: types.Bool{Value: true}, + Region: types.String{Value: "us-east-1"}, + TrafficFilter: util.StringListAsType([]string{"0.0.0.0/0", "192.168.10.0/24"}), + Observability: types.List{ + ElemType: types.ObjectType{AttrTypes: observabilitySettingsAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: observabilitySettingsAttrTypes(), + Attrs: map[string]attr.Value{ + "deployment_id": types.String{Value: mock.ValidClusterID}, + "ref_id": types.String{Value: "main-elasticsearch"}, + "logs": types.Bool{Value: true}, + "metrics": types.Bool{Value: true}, + }, + }, + }, }, - } -} - -func newObservabilitySample() map[string]interface{} { - return map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, + Elasticsearch: types.List{ + ElemType: types.ObjectType{AttrTypes: elasticsearchResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: elasticsearchResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "cloud_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "autoscale": types.String{Value: ""}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + Kibana: types.List{ + ElemType: types.ObjectType{AttrTypes: kibanaResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: kibanaResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: apmResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: apmResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + IntegrationsServer: types.List{ + ElemType: types.ObjectType{AttrTypes: integrationsServerResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: integrationsServerResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{AttrTypes: enterpriseSearchResourceInfoAttrTypes()}, + Elems: []attr.Value{ + types.Object{ + AttrTypes: enterpriseSearchResourceInfoAttrTypes(), + Attrs: map[string]attr.Value{ + "elasticsearch_cluster_ref_id": types.String{Value: ""}, + "healthy": types.Bool{Value: true}, + "http_endpoint": types.String{Value: ""}, + "https_endpoint": types.String{Value: ""}, + "ref_id": types.String{Value: ""}, + "resource_id": types.String{Value: ""}, + "status": types.String{Value: ""}, + "version": types.String{Value: ""}, + "topology": types.List{ + ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}, + Elems: []attr.Value{}, + }, + }, + }, + }, + }, + Tags: util.StringMapAsType(map[string]string{"foo": "bar"}), } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go index 1369bd4a9..6c6a6a70d 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm.go @@ -18,84 +18,110 @@ package deploymentdatasource import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenApmResources takes in Apm resource models and returns its // flattened form. -func flattenApmResources(in []*models.ApmResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) +func flattenApmResources(ctx context.Context, in []*models.ApmResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics + var result = make([]apmResourceInfoModelV0, 0, len(in)) + for _, res := range in { - var m = make(map[string]interface{}) + model := apmResourceInfoModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}}, + } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} } if res.RefID != nil { - m["ref_id"] = *res.RefID + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ID != nil { - m["resource_id"] = *res.Info.ID + model.ResourceID = types.String{Value: *res.Info.ID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentApmPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.Apm != nil { - m["version"] = plan.Apm.Version + model.Version = types.String{Value: plan.Apm.Version} } - m["topology"] = flattenApmTopology(plan) + var diags diag.Diagnostics + model.Topology, diags = flattenApmTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } - result = append(result, m) + result = append(result, model) } - return result + var target types.List + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: apmResourceInfoAttrTypes(), + }, + }, &target)...) + + return target, diagnostics } -func flattenApmTopology(plan *models.ApmPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenApmTopology(ctx context.Context, plan *models.ApmPlan) (types.List, diag.Diagnostics) { + var result = make([]apmTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model apmTopologyModelV0 if isApmSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isApmSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} - result = append(result, m) + result = append(result, model) } - return result + var target types.List + + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: apmTopologyAttrTypes(), + }, + }, &target) + + return target, diags } func isApmSizePopulated(topology *models.ApmTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go index cbcdab473..2c9f43725 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_apm_test.go @@ -18,12 +18,17 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenApmResource(t *testing.T) { @@ -33,12 +38,12 @@ func Test_flattenApmResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []apmResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.ApmResourceInfo{}}, - want: []interface{}{}, + want: []apmResourceInfoModelV0{}, }, { name: "parses the apm resource", @@ -85,32 +90,37 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), + want: []apmResourceInfoModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-apm"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + HttpEndpoint: types.String{Value: "http://apmresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://apmresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: apmTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: apmTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.apm.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, }, - }, + }}, }, - }, + }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenApmResources(tt.args.in) + apm, diags := flattenApmResources(context.Background(), tt.args.in) + assert.Empty(t, diags) + var got []apmResourceInfoModelV0 + apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "apm", apm) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go index b97e59ed4..0e7d1be73 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch.go @@ -18,147 +18,174 @@ package deploymentdatasource import ( + "context" "encoding/json" "fmt" "strconv" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenElasticsearchResources takes in Elasticsearch resource models and returns its // flattened form. -func flattenElasticsearchResources(in []*models.ElasticsearchResourceInfo) ([]interface{}, error) { - var result = make([]interface{}, 0, len(in)) +func flattenElasticsearchResources(ctx context.Context, in []*models.ElasticsearchResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics + var result = make([]elasticsearchResourceInfoModelV0, 0, len(in)) + for _, res := range in { - var m = make(map[string]interface{}) + model := elasticsearchResourceInfoModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}}, + } if res.RefID != nil { - m["ref_id"] = *res.RefID + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ClusterID != nil { - m["resource_id"] = *res.Info.ClusterID + model.ResourceID = types.String{Value: *res.Info.ClusterID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentEsPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.Elasticsearch != nil { - m["version"] = plan.Elasticsearch.Version + model.Version = types.String{Value: plan.Elasticsearch.Version} } if plan.AutoscalingEnabled != nil { - m["autoscale"] = strconv.FormatBool(*plan.AutoscalingEnabled) + model.Autoscale = types.String{Value: strconv.FormatBool(*plan.AutoscalingEnabled)} } - top, err := flattenElasticsearchTopology(plan) - if err != nil { - return nil, err - } - m["topology"] = top + var diags diag.Diagnostics + model.Topology, diags = flattenElasticsearchTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { - m["cloud_id"] = res.Info.Metadata.CloudID - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } + model.CloudID = types.String{Value: res.Info.Metadata.CloudID} + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } - result = append(result, m) + + result = append(result, model) } - return result, nil + var target types.List + + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: elasticsearchResourceInfoAttrTypes(), + }, + }, &target)...) + + return target, diagnostics } -func flattenElasticsearchTopology(plan *models.ElasticsearchClusterPlan) ([]interface{}, error) { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenElasticsearchTopology(ctx context.Context, plan *models.ElasticsearchClusterPlan) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + var result = make([]elasticsearchTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + model := elasticsearchTopologyModelV0{ + NodeRoles: types.Set{ElemType: types.StringType}, + } - if isSizePopulated(topology) && *topology.Size.Value == 0 { + if isElasticsearchSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} - if isSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + if isElasticsearchSizePopulated(topology) { + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} if topology.NodeType != nil { if topology.NodeType.Data != nil { - m["node_type_data"] = *topology.NodeType.Data + model.NodeTypeData = types.Bool{Value: *topology.NodeType.Data} } if topology.NodeType.Ingest != nil { - m["node_type_ingest"] = *topology.NodeType.Ingest + model.NodeTypeIngest = types.Bool{Value: *topology.NodeType.Ingest} } if topology.NodeType.Master != nil { - m["node_type_master"] = *topology.NodeType.Master + model.NodeTypeMaster = types.Bool{Value: *topology.NodeType.Master} } if topology.NodeType.Ml != nil { - m["node_type_ml"] = *topology.NodeType.Ml + model.NodeTypeMl = types.Bool{Value: *topology.NodeType.Ml} } } if len(topology.NodeRoles) > 0 { - m["node_roles"] = schema.NewSet(schema.HashString, util.StringToItems( - topology.NodeRoles..., - )) + diags.Append(tfsdk.ValueFrom(ctx, topology.NodeRoles, types.SetType{ElemType: types.StringType}, &model.NodeRoles)...) } - autoscaling := make(map[string]interface{}) - if ascale := topology.AutoscalingMax; ascale != nil { - autoscaling["max_size_resource"] = *ascale.Resource - autoscaling["max_size"] = util.MemoryToState(*ascale.Value) + var autoscaling elasticsearchAutoscalingModel + var hasAutoscalingModel = false + if limit := topology.AutoscalingMax; limit != nil { + autoscaling.MaxSizeResource = types.String{Value: *limit.Resource} + autoscaling.MaxSize = types.String{Value: util.MemoryToState(*limit.Value)} + hasAutoscalingModel = true } - if ascale := topology.AutoscalingMin; ascale != nil { - autoscaling["min_size_resource"] = *ascale.Resource - autoscaling["min_size"] = util.MemoryToState(*ascale.Value) + if limit := topology.AutoscalingMin; limit != nil { + autoscaling.MinSizeResource = types.String{Value: *limit.Resource} + autoscaling.MinSize = types.String{Value: util.MemoryToState(*limit.Value)} + hasAutoscalingModel = true } if topology.AutoscalingPolicyOverrideJSON != nil { b, err := json.Marshal(topology.AutoscalingPolicyOverrideJSON) if err != nil { - return nil, fmt.Errorf( - "elasticsearch topology %s: unable to persist policy_override_json: %w", - topology.ID, err, + diags.AddError( + "Invalid elasticsearch topology policy_override_json", + fmt.Sprintf("elasticsearch topology %s: unable to persist policy_override_json: %v", topology.ID, err), ) + } else { + autoscaling.PolicyOverrideJson = types.String{Value: string(b)} + hasAutoscalingModel = true } - autoscaling["policy_override_json"] = string(b) } - if len(autoscaling) > 0 { - m["autoscaling"] = []interface{}{autoscaling} + if hasAutoscalingModel { + diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchAutoscalingModel{autoscaling}, elasticsearchAutoscalingListType(), &model.Autoscaling)...) } - result = append(result, m) + result = append(result, model) } - return result, nil + var target types.List + + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: elasticsearchTopologyAttrTypes(), + }, + }, &target)...) + + return target, diags } -func isSizePopulated(topology *models.ElasticsearchClusterTopologyElement) bool { +func isElasticsearchSizePopulated(topology *models.ElasticsearchClusterTopologyElement) bool { if topology.Size != nil && topology.Size.Value != nil { return true } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go index a251cf143..3e623b99f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_elasticsearch_test.go @@ -18,12 +18,17 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenElasticsearchResources(t *testing.T) { @@ -33,13 +38,13 @@ func Test_flattenElasticsearchResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []elasticsearchResourceInfoModelV0 err string }{ { name: "empty resource list returns empty list", args: args{in: []*models.ElasticsearchResourceInfo{}}, - want: []interface{}{}, + want: []elasticsearchResourceInfoModelV0{}, }, { name: "parses elasticsearch resource", @@ -81,6 +86,9 @@ func Test_flattenElasticsearchResources(t *testing.T) { Master: ec.Bool(true), Ml: ec.Bool(false), }, + // NodeRoles cannot be used simultaneously with NodeType + // but let's have it here for testing purposes + NodeRoles: []string{"data_content", "data_hot"}, AutoscalingMax: &models.TopologySize{ Resource: ec.String("memory"), Value: ec.Int32(15360), @@ -89,6 +97,11 @@ func Test_flattenElasticsearchResources(t *testing.T) { Resource: ec.String("memory"), Value: ec.Int32(1024), }, + AutoscalingPolicyOverrideJSON: map[string]interface{}{ + "proactive_storage": map[string]interface{}{ + "forecast_window": "3 h", + }, + }, }, { NodeCountPerZone: 1, @@ -112,42 +125,61 @@ func Test_flattenElasticsearchResources(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "cloud_id": "some CLOUD ID", - "http_endpoint": "http://somecluster.cloud.elastic.co:9200", - "https_endpoint": "https://somecluster.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "size_resource": "memory", - "node_type_data": true, - "node_type_ingest": true, - "node_type_master": true, - "node_type_ml": false, - "zone_count": int32(1), - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "15g", - "max_size_resource": "memory", - "min_size": "1g", - "min_size_resource": "memory", - }}, - }}, + want: []elasticsearchResourceInfoModelV0{{ + Autoscale: types.String{Value: "true"}, + RefID: types.String{Value: "main-elasticsearch"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + CloudID: types.String{Value: "some CLOUD ID"}, + HttpEndpoint: types.String{Value: "http://somecluster.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://somecluster.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: elasticsearchTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.data.highio.i3"}, + "size": types.String{Value: "2g"}, + "size_resource": types.String{Value: "memory"}, + "node_type_data": types.Bool{Value: true}, + "node_type_ingest": types.Bool{Value: true}, + "node_type_master": types.Bool{Value: true}, + "node_type_ml": types.Bool{Value: false}, + "node_roles": types.Set{ElemType: types.StringType, Elems: func() []attr.Value { + result := make([]attr.Value, 0, 2) + for _, role := range []string{"data_content", "data_hot"} { + result = append(result, types.String{Value: role}) + } + return result + }()}, + "zone_count": types.Int64{Value: 1}, + "autoscaling": types.List{ElemType: types.ObjectType{AttrTypes: elasticsearchAutoscalingAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: elasticsearchAutoscalingAttrTypes(), + Attrs: map[string]attr.Value{ + "max_size": types.String{Value: "15g"}, + "max_size_resource": types.String{Value: "memory"}, + "min_size": types.String{Value: "1g"}, + "min_size_resource": types.String{Value: "memory"}, + "policy_override_json": types.String{Value: "{\"proactive_storage\":{\"forecast_window\":\"3 h\"}}"}, + }}, + }, + }, + }}, + }, + }, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := flattenElasticsearchResources(tt.args.in) - if err != nil && assert.EqualError(t, err, tt.err) { - t.Error(err) - } + elasticsearch, diags := flattenElasticsearchResources(context.Background(), tt.args.in) + assert.Empty(t, diags) + var got []elasticsearchResourceInfoModelV0 + elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "elasticsearch", elasticsearch) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go index 1e0efb9c1..bd306fd43 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search.go @@ -18,97 +18,123 @@ package deploymentdatasource import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenEnterpriseSearchResources takes in EnterpriseSearch resource models and returns its // flattened form. -func flattenEnterpriseSearchResources(in []*models.EnterpriseSearchResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) +func flattenEnterpriseSearchResources(ctx context.Context, in []*models.EnterpriseSearchResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics + var result = make([]enterpriseSearchResourceInfoModelV0, 0, len(in)) - if res.RefID != nil { - m["ref_id"] = *res.RefID + for _, res := range in { + model := enterpriseSearchResourceInfoModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}}, } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} + } + + if res.RefID != nil { + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ID != nil { - m["resource_id"] = *res.Info.ID + model.ResourceID = types.String{Value: *res.Info.ID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentEssPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.EnterpriseSearch != nil { - m["version"] = plan.EnterpriseSearch.Version + model.Version = types.String{Value: plan.EnterpriseSearch.Version} } - m["topology"] = flattenEnterpriseSearchTopology(plan) + var diags diag.Diagnostics + model.Topology, diags = flattenEnterpriseSearchTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } - result = append(result, m) + + result = append(result, model) } - return result + var target types.List + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: enterpriseSearchResourceInfoAttrTypes(), + }, + }, &target)...) + + return target, diagnostics } -func flattenEnterpriseSearchTopology(plan *models.EnterpriseSearchPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenEnterpriseSearchTopology(ctx context.Context, plan *models.EnterpriseSearchPlan) (types.List, diag.Diagnostics) { + var result = make([]enterpriseSearchTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model enterpriseSearchTopologyModelV0 if isEsSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID - - m["zone_count"] = topology.ZoneCount + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isEsSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} + if topology.NodeType != nil { if topology.NodeType.Appserver != nil { - m["node_type_appserver"] = *topology.NodeType.Appserver + model.NodeTypeAppserver = types.Bool{Value: *topology.NodeType.Appserver} } if topology.NodeType.Connector != nil { - m["node_type_connector"] = *topology.NodeType.Connector + model.NodeTypeConnector = types.Bool{Value: *topology.NodeType.Connector} } if topology.NodeType.Worker != nil { - m["node_type_worker"] = *topology.NodeType.Worker + model.NodeTypeWorker = types.Bool{Value: *topology.NodeType.Worker} } } - result = append(result, m) + result = append(result, model) } - return result + var target types.List + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: enterpriseSearchTopologyAttrTypes(), + }, + }, &target) + + return target, diags } func isEsSizePopulated(topology *models.EnterpriseSearchTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go index 53dec0777..528b310b6 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_enterprise_search_test.go @@ -18,12 +18,17 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenEnterpriseSearchResource(t *testing.T) { @@ -33,12 +38,12 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []enterpriseSearchResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.EnterpriseSearchResourceInfo{}}, - want: []interface{}{}, + want: []enterpriseSearchResourceInfoModelV0{}, }, { name: "parses the enterprisesearch resource", @@ -96,32 +101,41 @@ func Test_flattenEnterpriseSearchResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "http_endpoint": "http://enterprisesearchresource.cloud.elastic.co:9200", - "https_endpoint": "https://enterprisesearchresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_appserver": true, - "node_type_worker": false, - }}, - }, + want: []enterpriseSearchResourceInfoModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-enterprise_search"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + HttpEndpoint: types.String{Value: "http://enterprisesearchresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://enterprisesearchresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: enterpriseSearchTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: enterpriseSearchTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.enterprisesearch.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, + "node_type_appserver": types.Bool{Value: true}, + "node_type_connector": types.Bool{Value: false}, + "node_type_worker": types.Bool{Value: false}, + }, + }, + }, + }}, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEnterpriseSearchResources(tt.args.in) + enterpriseSearch, diags := flattenEnterpriseSearchResources(context.Background(), tt.args.in) + assert.Empty(t, diags) + var got []enterpriseSearchResourceInfoModelV0 + enterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "enterprise_search", enterpriseSearch) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go index 70a3f6314..08000c06b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server.go @@ -18,84 +18,109 @@ package deploymentdatasource import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenIntegrationsServerResources takes in IntegrationsServer resource models and returns its // flattened form. -func flattenIntegrationsServerResources(in []*models.IntegrationsServerResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) +func flattenIntegrationsServerResources(ctx context.Context, in []*models.IntegrationsServerResourceInfo) (types.List, diag.Diagnostics) { + var diagnostics diag.Diagnostics + var result = make([]integrationsServerResourceInfoModelV0, 0, len(in)) + for _, res := range in { - var m = make(map[string]interface{}) + model := integrationsServerResourceInfoModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}}, + } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} } if res.RefID != nil { - m["ref_id"] = *res.RefID + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ID != nil { - m["resource_id"] = *res.Info.ID + model.ResourceID = types.String{Value: *res.Info.ID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentIntegrationsServerPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.IntegrationsServer != nil { - m["version"] = plan.IntegrationsServer.Version + model.Version = types.String{Value: plan.IntegrationsServer.Version} } - m["topology"] = flattenIntegrationsServerTopology(plan) + var diags diag.Diagnostics + model.Topology, diags = flattenIntegrationsServerTopology(ctx, plan) + diagnostics.Append(diags...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } - result = append(result, m) + result = append(result, model) } - return result + var target types.List + diagnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: integrationsServerResourceInfoAttrTypes(), + }, + }, &target)...) + + return target, diagnostics } -func flattenIntegrationsServerTopology(plan *models.IntegrationsServerPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenIntegrationsServerTopology(ctx context.Context, plan *models.IntegrationsServerPlan) (types.List, diag.Diagnostics) { + var result = make([]integrationsServerTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model integrationsServerTopologyModelV0 if isIntegrationsServerSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isIntegrationsServerSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} - result = append(result, m) + result = append(result, model) } - return result + var target types.List + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: apmTopologyAttrTypes(), + }, + }, &target) + + return target, diags } func isIntegrationsServerSizePopulated(topology *models.IntegrationsServerTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go index dd8576516..8eb6d73df 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_integrations_server_test.go @@ -18,12 +18,17 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenIntegrationsServerResource(t *testing.T) { @@ -33,12 +38,12 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []integrationsServerResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.IntegrationsServerResourceInfo{}}, - want: []interface{}{}, + want: []integrationsServerResourceInfoModelV0{}, }, { name: "parses the integrations_server resource", @@ -57,60 +62,67 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { HTTPS: ec.Int32(9243), }, }, - PlanInfo: &models.IntegrationsServerPlansInfo{Current: &models.IntegrationsServerPlanInfo{ - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{ - Version: "8.0.0", - }, - ClusterTopology: []*models.IntegrationsServerTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.integrations_server.r4", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, + PlanInfo: &models.IntegrationsServerPlansInfo{ + Current: &models.IntegrationsServerPlanInfo{ + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{ + Version: "8.0.0", }, - { - ZoneCount: 1, - InstanceConfigurationID: "aws.integrations_server.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), + ClusterTopology: []*models.IntegrationsServerTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.integrations_server.r4", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + { + ZoneCount: 1, + InstanceConfigurationID: "aws.integrations_server.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, }, }, }, }, - }}, + }, }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "version": "8.0.0", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), + want: []integrationsServerResourceInfoModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-integrations_server"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "8.0.0"}, + HttpEndpoint: types.String{Value: "http://integrations_serverresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://integrations_serverresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: integrationsServerTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: integrationsServerTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.integrations_server.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, }, - }, + }}, }, - }, + }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenIntegrationsServerResources(tt.args.in) + integrationsServer, diags := flattenIntegrationsServerResources(context.Background(), tt.args.in) + assert.Empty(t, diags) + var got []integrationsServerResourceInfoModelV0 + integrationsServer.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "integrations_server", integrationsServer) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go index 50d1f800d..f863d0c28 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana.go @@ -18,83 +18,111 @@ package deploymentdatasource import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) // flattenKibanaResources takes in Kibana resource models and returns its // flattened form. -func flattenKibanaResources(in []*models.KibanaResourceInfo) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) +func flattenKibanaResources(ctx context.Context, in []*models.KibanaResourceInfo) (types.List, diag.Diagnostics) { + var diagsnostics diag.Diagnostics + var result = make([]kibanaResourceInfoModelV0, 0, len(in)) - if res.RefID != nil { - m["ref_id"] = *res.RefID + for _, res := range in { + model := kibanaResourceInfoModelV0{ + Topology: types.List{ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}}, } if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID + model.ElasticsearchClusterRefID = types.String{Value: *res.ElasticsearchClusterRefID} + } + + if res.RefID != nil { + model.RefID = types.String{Value: *res.RefID} } if res.Info != nil { if res.Info.Healthy != nil { - m["healthy"] = *res.Info.Healthy + model.Healthy = types.Bool{Value: *res.Info.Healthy} } if res.Info.ClusterID != nil { - m["resource_id"] = *res.Info.ClusterID + model.ResourceID = types.String{Value: *res.Info.ClusterID} } if res.Info.Status != nil { - m["status"] = *res.Info.Status + model.Status = types.String{Value: *res.Info.Status} } if !util.IsCurrentKibanaPlanEmpty(res) { var plan = res.Info.PlanInfo.Current.Plan if plan.Kibana != nil { - m["version"] = plan.Kibana.Version + model.Version = types.String{Value: plan.Kibana.Version} } - m["topology"] = flattenKibanaTopology(plan) + var diags diag.Diagnostics + model.Topology, diags = flattenKibanaTopology(ctx, plan) + diagsnostics.Append(diags...) } if res.Info.Metadata != nil { - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } + model.HttpEndpoint, model.HttpsEndpoint = converters.ExtractEndpointsToTypes(res.Info.Metadata) } } - result = append(result, m) + + result = append(result, model) } - return result + var target types.List + + diagsnostics.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: kibanaResourceInfoAttrTypes(), + }, + }, &target)...) + + return target, diagsnostics } -func flattenKibanaTopology(plan *models.KibanaClusterPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) +func flattenKibanaTopology(ctx context.Context, plan *models.KibanaClusterPlan) (types.List, diag.Diagnostics) { + var result = make([]kibanaTopologyModelV0, 0, len(plan.ClusterTopology)) for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) + var model kibanaTopologyModelV0 if isKibanaSizePopulated(topology) && *topology.Size.Value == 0 { continue } - m["instance_configuration_id"] = topology.InstanceConfigurationID + model.InstanceConfigurationID = types.String{Value: topology.InstanceConfigurationID} if isKibanaSizePopulated(topology) { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource + model.Size = types.String{Value: util.MemoryToState(*topology.Size.Value)} + model.SizeResource = types.String{Value: *topology.Size.Resource} } - m["zone_count"] = topology.ZoneCount + model.ZoneCount = types.Int64{Value: int64(topology.ZoneCount)} - result = append(result, m) + result = append(result, model) } - return result + var target types.List + + diags := tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: kibanaTopologyAttrTypes(), + }, + }, &target) + + return target, diags } func isKibanaSizePopulated(topology *models.KibanaClusterTopologyElement) bool { diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go index f3d3c41f3..8c023e164 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_kibana_test.go @@ -18,12 +18,17 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenKibanaResources(t *testing.T) { @@ -33,12 +38,12 @@ func Test_flattenKibanaResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []kibanaResourceInfoModelV0 }{ { name: "empty resource list returns empty list", args: args{in: []*models.KibanaResourceInfo{}}, - want: []interface{}{}, + want: []kibanaResourceInfoModelV0{}, }, { name: "parses the kibana resource", @@ -87,32 +92,37 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "http_endpoint": "http://kibanaresource.cloud.elastic.co:9200", - "https_endpoint": "https://kibanaresource.cloud.elastic.co:9243", - "healthy": true, - "status": "started", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), + want: []kibanaResourceInfoModelV0{{ + ElasticsearchClusterRefID: types.String{Value: "main-elasticsearch"}, + RefID: types.String{Value: "main-kibana"}, + ResourceID: types.String{Value: mock.ValidClusterID}, + Version: types.String{Value: "7.7.0"}, + HttpEndpoint: types.String{Value: "http://kibanaresource.cloud.elastic.co:9200"}, + HttpsEndpoint: types.String{Value: "https://kibanaresource.cloud.elastic.co:9243"}, + Healthy: types.Bool{Value: true}, + Status: types.String{Value: "started"}, + Topology: types.List{ElemType: types.ObjectType{AttrTypes: kibanaTopologyAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: kibanaTopologyAttrTypes(), + Attrs: map[string]attr.Value{ + "instance_configuration_id": types.String{Value: "aws.kibana.r4"}, + "size": types.String{Value: "1g"}, + "size_resource": types.String{Value: "memory"}, + "zone_count": types.Int64{Value: 1}, }, - }, - }, + }}}, + }, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenKibanaResources(tt.args.in) + kibana, diags := flattenKibanaResources(context.Background(), tt.args.in) + assert.Empty(t, diags) + var got []kibanaResourceInfoModelV0 + kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "kibana", kibana) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go index 53da35e1a..9bf527c5b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability.go @@ -17,33 +17,57 @@ package deploymentdatasource -import "github.com/elastic/cloud-sdk-go/pkg/models" +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/models" +) // flattenObservability parses a deployment's observability settings. -func flattenObservability(settings *models.DeploymentSettings) []interface{} { - if settings == nil || settings.Observability == nil { - return nil +func flattenObservability(ctx context.Context, settings *models.DeploymentSettings) (types.List, diag.Diagnostics) { + model := observabilitySettingsModel{ + Metrics: types.Bool{Value: false}, + Logs: types.Bool{Value: false}, + } + empty := true + + target := types.List{ + ElemType: types.ObjectType{ + AttrTypes: observabilitySettingsAttrTypes(), + }, } - var m = make(map[string]interface{}) + if settings == nil || settings.Observability == nil { + target.Null = true + return target, nil + } // We are only accepting a single deployment ID and refID for both logs and metrics. // If either of them is not nil the deployment ID and refID will be filled. if settings.Observability.Metrics != nil { - m["deployment_id"] = settings.Observability.Metrics.Destination.DeploymentID - m["ref_id"] = settings.Observability.Metrics.Destination.RefID - m["metrics"] = true + model.DeploymentID = types.String{Value: *settings.Observability.Metrics.Destination.DeploymentID} + model.RefID = types.String{Value: settings.Observability.Metrics.Destination.RefID} + model.Metrics = types.Bool{Value: true} + empty = false } if settings.Observability.Logging != nil { - m["deployment_id"] = settings.Observability.Logging.Destination.DeploymentID - m["ref_id"] = settings.Observability.Logging.Destination.RefID - m["logs"] = true + model.DeploymentID = types.String{Value: *settings.Observability.Logging.Destination.DeploymentID} + model.RefID = types.String{Value: settings.Observability.Logging.Destination.RefID} + model.Logs = types.Bool{Value: true} + empty = false } - if len(m) == 0 { - return nil + if empty { + target.Null = true + return target, nil } - return []interface{}{m} + diags := tfsdk.ValueFrom(ctx, []observabilitySettingsModel{model}, target.Type(ctx), &target) + + return target, diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go index eb2e8d52d..3396f898f 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_observability_test.go @@ -18,11 +18,15 @@ package deploymentdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func TestFlattenObservability(t *testing.T) { @@ -32,18 +36,18 @@ func TestFlattenObservability(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []observabilitySettingsModel }{ { - name: "flattens no observability settings when empty", + name: "flattens no observability settings when empty #1", args: args{}, }, { - name: "flattens no observability settings when empty", + name: "flattens no observability settings when empty #2", args: args{settings: &models.DeploymentSettings{}}, }, { - name: "flattens no observability settings when empty", + name: "flattens no observability settings when empty #3", args: args{settings: &models.DeploymentSettings{Observability: &models.DeploymentObservabilitySettings{}}}, }, { @@ -58,10 +62,11 @@ func TestFlattenObservability(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, + want: []observabilitySettingsModel{{ + DeploymentID: types.String{Value: mock.ValidClusterID}, + RefID: types.String{Value: "main-elasticsearch"}, + Logs: types.Bool{Value: true}, + Metrics: types.Bool{Value: false}, }}, }, { @@ -76,10 +81,11 @@ func TestFlattenObservability(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": true, + want: []observabilitySettingsModel{{ + DeploymentID: types.String{Value: mock.ValidClusterID}, + RefID: types.String{Value: "main-elasticsearch"}, + Logs: types.Bool{Value: false}, + Metrics: types.Bool{Value: true}, }}, }, { @@ -100,18 +106,22 @@ func TestFlattenObservability(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, + want: []observabilitySettingsModel{{ + DeploymentID: types.String{Value: mock.ValidClusterID}, + RefID: types.String{Value: "main-elasticsearch"}, + Logs: types.Bool{Value: true}, + Metrics: types.Bool{Value: true}, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenObservability(tt.args.settings) + observability, diags := flattenObservability(context.Background(), tt.args.settings) + assert.Empty(t, diags) + var got []observabilitySettingsModel + observability.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "observability", observability) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go index 18fda3a88..b2c5d4e81 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter.go @@ -18,19 +18,25 @@ package deploymentdatasource import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" ) // flattenTrafficFiltering parses a deployment's traffic filtering settings. -func flattenTrafficFiltering(settings *models.DeploymentSettings) []interface{} { +func flattenTrafficFiltering(ctx context.Context, settings *models.DeploymentSettings) (types.List, diag.Diagnostics) { + target := types.List{ElemType: types.StringType} + if settings == nil || settings.TrafficFilterSettings == nil { - return nil + target.Null = true + return target, nil } - var rules []interface{} - for _, rule := range settings.TrafficFilterSettings.Rulesets { - rules = append(rules, rule) - } + diags := tfsdk.ValueFrom(ctx, settings.TrafficFilterSettings.Rulesets, target.Type(ctx), &target) - return rules + return target, diags } diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go index 557e0639a..87c18a07e 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go +++ b/ec/ecdatasource/deploymentdatasource/flatteners_traffic_filter_test.go @@ -18,10 +18,13 @@ package deploymentdatasource import ( + "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenTrafficFiltering(t *testing.T) { @@ -31,32 +34,33 @@ func Test_flattenTrafficFiltering(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []string }{ { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty #1", args: args{}, }, { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty #2", args: args{settings: &models.DeploymentSettings{}}, }, { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty #3", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{}, }}, }, { - name: "parses no rules when they're empty", + name: "parses no rules when they're empty #4", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{}, }, }}, + want: []string{}, }, { - name: "parses no rules when they're empty", + name: "parses rules", args: args{settings: &models.DeploymentSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{ @@ -65,7 +69,7 @@ func Test_flattenTrafficFiltering(t *testing.T) { }, }, }}, - want: []interface{}{ + want: []string{ "one-id-of-a-rule", "another-id-of-another-rule", }, @@ -73,8 +77,12 @@ func Test_flattenTrafficFiltering(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenTrafficFiltering(tt.args.settings) + trafficFilter, diags := flattenTrafficFiltering(context.Background(), tt.args.settings) + assert.Empty(t, diags) + var got []string + trafficFilter.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + util.CheckConverionToAttrValue(t, &DataSource{}, "traffic_filter", trafficFilter) }) } } diff --git a/ec/ecdatasource/deploymentdatasource/schema.go b/ec/ecdatasource/deploymentdatasource/schema.go index b8a42b87a..acbfe4f3e 100644 --- a/ec/ecdatasource/deploymentdatasource/schema.go +++ b/ec/ecdatasource/deploymentdatasource/schema.go @@ -18,103 +18,79 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "alias": { - Type: schema.TypeString, - Computed: true, - }, - "healthy": { - Type: schema.TypeBool, - Computed: true, - }, - "id": { - Type: schema.TypeString, - Required: true, - }, - "name": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Computed: true, - }, - "traffic_filter": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, +func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "alias": { + Type: types.StringType, + Description: "Deployment alias.", + Computed: true, }, - }, - "observability": { - Type: schema.TypeList, - Computed: true, - Elem: newObservabilitySettings(), - }, - "tags": { - Type: schema.TypeMap, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, + "healthy": { + Type: types.BoolType, + Description: "Overall health status of the deployment.", + Computed: true, }, - }, - - // Deployment resources - "elasticsearch": { - Type: schema.TypeList, - Computed: true, - Elem: newElasticsearchResourceInfo(), - }, - "kibana": { - Type: schema.TypeList, - Computed: true, - Elem: newKibanaResourceInfo(), - }, - "apm": { - Type: schema.TypeList, - Computed: true, - Elem: newApmResourceInfo(), - }, - "integrations_server": { - Type: schema.TypeList, - Computed: true, - Elem: newIntegrationsServerResourceInfo(), - }, - "enterprise_search": { - Type: schema.TypeList, - Computed: true, - Elem: newEnterpriseSearchResourceInfo(), - }, - } -} - -func newObservabilitySettings() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Computed: true, + "id": { + Type: types.StringType, + Description: "The unique ID of the deployment.", + Required: true, + }, + "name": { + Type: types.StringType, + Description: "The name of the deployment.", + Computed: true, + }, + "region": { + Type: types.StringType, + Description: "Region where the deployment is hosted.", + Computed: true, }, - "ref_id": { - Type: schema.TypeString, - Computed: true, + "deployment_template_id": { + Type: types.StringType, + Description: "ID of the deployment template this deployment is based off.", + Computed: true, }, - "logs": { - Type: schema.TypeBool, - Computed: true, + "traffic_filter": { + Type: types.ListType{ElemType: types.StringType}, + Description: "Traffic filter block, which contains a list of traffic filter rule identifiers.", + Computed: true, }, - "metrics": { - Type: schema.TypeBool, - Computed: true, + "tags": { + Type: types.MapType{ElemType: types.StringType}, + Description: "Key value map of arbitrary string tags.", + Computed: true, }, + "observability": observabilitySettingsSchema(), + "elasticsearch": elasticsearchResourceInfoSchema(), + "kibana": kibanaResourceInfoSchema(), + "apm": apmResourceInfoSchema(), + "integrations_server": integrationsServerResourceInfoSchema(), + "enterprise_search": enterpriseSearchResourceInfoSchema(), }, - } + }, nil +} + +type modelV0 struct { + Alias types.String `tfsdk:"alias"` + Healthy types.Bool `tfsdk:"healthy"` + ID types.String `tfsdk:"id"` + Name types.String `tfsdk:"name"` + Region types.String `tfsdk:"region"` + DeploymentTemplateID types.String `tfsdk:"deployment_template_id"` + TrafficFilter types.List `tfsdk:"traffic_filter"` //< string + Observability types.List `tfsdk:"observability"` //< observabilitySettingsModel + Tags types.Map `tfsdk:"tags"` //< string + Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticsearchResourceInfoModelV0 + Kibana types.List `tfsdk:"kibana"` //< kibanaResourceInfoModelV0 + Apm types.List `tfsdk:"apm"` //< apmResourceInfoModelV0 + IntegrationsServer types.List `tfsdk:"integrations_server"` //< integrationsServerResourceInfoModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< enterpriseSearchResourceInfoModelV0 } diff --git a/ec/ecdatasource/deploymentdatasource/schema_apm.go b/ec/ecdatasource/deploymentdatasource/schema_apm.go index 3cda06270..33adf0e6f 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_apm.go +++ b/ec/ecdatasource/deploymentdatasource/schema_apm.go @@ -18,72 +18,116 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newApmResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func apmResourceInfoSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Instance configuration of the APM type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The locally-unique user-specified id of an APM Resource.", + Computed: true, }, "healthy": { - Type: schema.TypeBool, - Computed: true, + Type: types.BoolType, + Description: "APM resource health status.", + Computed: true, }, "http_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTP endpoint for the APM resource.", + Computed: true, }, "https_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTPS endpoint for the APM resource.", + Computed: true, }, "ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for this APM resource.", + Computed: true, }, "resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "APM resource status (for example, \"started\", \"stopped\", etc).", + Computed: true, }, "version": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, }, "topology": apmTopologySchema(), - }, + }), } } -func apmTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, +func apmResourceInfoAttrTypes() map[string]attr.Type { + return apmResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +func apmTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: `Amount of "size_resource" in Gigabytes. For example "4g".`, + Computed: true, }, - }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + }), } } + +func apmTopologyAttrTypes() map[string]attr.Type { + return apmTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +type apmResourceInfoModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< apmTopologyModelV0 +} + +type apmTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go index d742c18ce..52bf4c9eb 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go +++ b/ec/ecdatasource/deploymentdatasource/schema_elasticsearch.go @@ -18,139 +18,206 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newElasticsearchResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func elasticsearchResourceInfoSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Instance configuration of the Elasticsearch Elasticsearch resource.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "autoscale": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Whether or not Elasticsearch autoscaling is enabled.", + Computed: true, }, "healthy": { - Type: schema.TypeBool, - Computed: true, + Type: types.BoolType, + Description: "Elasticsearch resource health status.", + Computed: true, }, "cloud_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The cloud ID, an encoded string that provides other Elastic services with the necessary information to connect to this Elasticsearch and Kibana.", + MarkdownDescription: "The cloud ID, an encoded string that provides other Elastic services with the necessary information to connect to this Elasticsearch and Kibana. See [Configure Beats and Logstash with Cloud ID](https://www.elastic.co/guide/en/cloud/current/ec-cloud-id.html) for more information.", + Computed: true, }, "http_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTP endpoint for the Elasticsearch resource.", + Computed: true, }, "https_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTPS endpoint for the Elasticsearch resource.", + Computed: true, }, "ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for this Elasticsearch cluster.", + Computed: true, }, "resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Elasticsearch resource status (for example, \"started\", \"stopped\", etc).", + Computed: true, }, "version": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, }, "topology": elasticsearchTopologySchema(), - }, + }), } } -func elasticsearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - "node_type_data": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_master": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_ingest": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_ml": { - Type: schema.TypeBool, - Optional: true, - }, - "node_roles": { - Type: schema.TypeSet, - Set: schema.HashString, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, +func elasticsearchResourceInfoAttrTypes() map[string]attr.Type { + return elasticsearchResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +func elasticsearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: `Amount of "size_resource" per topology element in Gigabytes. For example "4g".`, + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + "node_type_data": { + Type: types.BoolType, + Description: "Defines whether this node can hold data (<8.0).", + Computed: true, + }, + "node_type_master": { + Type: types.BoolType, + Description: "Defines whether this node can be elected master (<8.0).", + Computed: true, + }, + "node_type_ingest": { + Type: types.BoolType, + Description: "Defines whether this node can run an ingest pipeline (<8.0).", + Computed: true, + }, + "node_type_ml": { + Type: types.BoolType, + Description: "Defines whether this node can run ML jobs (<8.0).", + Computed: true, + }, + "node_roles": { + Type: types.SetType{ElemType: types.StringType}, + Description: "Defines the list of Elasticsearch node roles assigned to the topology element. This is supported from v7.10, and required from v8.", + Computed: true, + }, + "autoscaling": elasticsearchAutoscalingSchema(), + }), + } +} + +func elasticsearchTopologyAttrTypes() map[string]attr.Type { + return elasticsearchTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +func elasticsearchAutoscalingSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "max_size_resource": { + Type: types.StringType, + Description: "Resource type used when specifying the maximum size the tier can scale up to.", + Computed: true, + }, + "max_size": { + Type: types.StringType, + Description: "Maximum size the tier can scale up to, e.g \"64g\".", + Computed: true, + }, + "min_size_resource": { + Type: types.StringType, + Description: "Resource type used when specifying the minimum size the tier can scale down to when bidirectional autoscaling is supported.", + Computed: true, + }, + "min_size": { + Type: types.StringType, + Description: "Minimum size the tier can scale down to when bidirectional autoscaling is supported.", + Computed: true, + }, + "policy_override_json": { + Type: types.StringType, + Description: "An arbitrary JSON object overriding the default autoscaling policy. Don't set unless you really know what you are doing.", + Computed: true, + }, + }), + } +} - "autoscaling": { - Type: schema.TypeList, - Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_size_resource": { - Description: "Maximum resource type for the maximum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +func elasticsearchAutoscalingListType() attr.Type { + return elasticsearchAutoscalingSchema().Attributes.Type() +} - "max_size": { - Description: "Maximum size value for the maximum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +func elasticsearchAutoscalingAttrTypes() map[string]attr.Type { + return elasticsearchAutoscalingListType().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} - "min_size_resource": { - Description: "Minimum resource type for the minimum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +type elasticsearchResourceInfoModelV0 struct { + Autoscale types.String `tfsdk:"autoscale"` + Healthy types.Bool `tfsdk:"healthy"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< elasticsearchTopologyModelV0 +} - "min_size": { - Description: "Minimum size value for the minimum autoscaling setting.", - Type: schema.TypeString, - Computed: true, - }, +type elasticsearchTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeData types.Bool `tfsdk:"node_type_data"` + NodeTypeMaster types.Bool `tfsdk:"node_type_master"` + NodeTypeIngest types.Bool `tfsdk:"node_type_ingest"` + NodeTypeMl types.Bool `tfsdk:"node_type_ml"` + NodeRoles types.Set `tfsdk:"node_roles"` + Autoscaling types.List `tfsdk:"autoscaling"` //< elasticsearchAutoscalingModel +} - "policy_override_json": { - Type: schema.TypeString, - Description: "Computed policy overrides set directly via the API or other clients.", - Computed: true, - }, - }, - }, - }, - }, - }, - } +type elasticsearchAutoscalingModel struct { + MaxSizeResource types.String `tfsdk:"max_size_resource"` + MaxSize types.String `tfsdk:"max_size"` + MinSizeResource types.String `tfsdk:"min_size_resource"` + MinSize types.String `tfsdk:"min_size"` + PolicyOverrideJson types.String `tfsdk:"policy_override_json"` } diff --git a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go index ec7cfeb5d..f3adfc391 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go +++ b/ec/ecdatasource/deploymentdatasource/schema_enterprise_search.go @@ -18,86 +18,134 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newEnterpriseSearchResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func enterpriseSearchResourceInfoSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Instance configuration of the Enterprise Search type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for an Elasticsearch resource in this deployment.", + Computed: true, }, "healthy": { - Type: schema.TypeBool, - Computed: true, + Type: types.BoolType, + Description: "Enterprise Search resource health status.", + Computed: true, }, "http_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTP endpoint for the Enterprise Search resource.", + Computed: true, }, "https_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTPS endpoint for the Enterprise Search resource.", + Computed: true, }, "ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for this Enterprise Search resource.", + Computed: true, }, "resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Enterprise Search resource status (for example, \"started\", \"stopped\", etc).", + Computed: true, }, "version": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, }, "topology": enterpriseSearchTopologySchema(), - }, + }), } } -func enterpriseSearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, - "node_type_appserver": { - Type: schema.TypeBool, - Computed: true, - }, - - "node_type_connector": { - Type: schema.TypeBool, - Computed: true, - }, +func enterpriseSearchResourceInfoAttrTypes() map[string]attr.Type { + return enterpriseSearchResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} - "node_type_worker": { - Type: schema.TypeBool, - Computed: true, - }, +func enterpriseSearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: `Amount of "size_resource" in Gigabytes. For example "4g".`, + Computed: true, + }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + "node_type_appserver": { + Type: types.BoolType, + Description: "Defines whether this instance should run as application/API server.", + Computed: true, + }, + "node_type_connector": { + Type: types.BoolType, + Description: "Defines whether this instance should run as connector.", + Computed: true, }, - }, + "node_type_worker": { + Type: types.BoolType, + Description: "Defines whether this instance should run as background worker.", + Computed: true, + }, + }), } } + +func enterpriseSearchTopologyAttrTypes() map[string]attr.Type { + return enterpriseSearchTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +type enterpriseSearchResourceInfoModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< enterpriseSearchTopologyModelV0 +} + +type enterpriseSearchTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeAppserver types.Bool `tfsdk:"node_type_appserver"` + NodeTypeConnector types.Bool `tfsdk:"node_type_connector"` + NodeTypeWorker types.Bool `tfsdk:"node_type_worker"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go index a9f8bfc6e..8701565f5 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go +++ b/ec/ecdatasource/deploymentdatasource/schema_integrations_server.go @@ -18,72 +18,116 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newIntegrationsServerResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func integrationsServerResourceInfoSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Instance configuration of the Integrations Server type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for an Elasticsearch resource in this deployment.", + Computed: true, }, "healthy": { - Type: schema.TypeBool, - Computed: true, + Type: types.BoolType, + Description: "Resource kind health status.", + Computed: true, }, "http_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTP endpoint for the resource kind.", + Computed: true, }, "https_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTPS endpoint for the resource kind.", + Computed: true, }, "ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for this Integrations Server resource.", + Computed: true, }, "resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Resource kind status (for example, \"started\", \"stopped\", etc).", + Computed: true, }, "version": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, }, "topology": integrationsServerTopologySchema(), - }, + }), } } -func integrationsServerTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, +func integrationsServerResourceInfoAttrTypes() map[string]attr.Type { + return integrationsServerResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +func integrationsServerTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: `Amount of "size_resource" in Gigabytes. For example "4g".`, + Computed: true, }, - }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + }), } } + +func integrationsServerTopologyAttrTypes() map[string]attr.Type { + return integrationsServerTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +type integrationsServerResourceInfoModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< integrationsServerTopologyModelV0 +} + +type integrationsServerTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_kibana.go b/ec/ecdatasource/deploymentdatasource/schema_kibana.go index b0ef4bce1..35ebee7f2 100644 --- a/ec/ecdatasource/deploymentdatasource/schema_kibana.go +++ b/ec/ecdatasource/deploymentdatasource/schema_kibana.go @@ -18,72 +18,116 @@ package deploymentdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newKibanaResourceInfo() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func kibanaResourceInfoSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Instance configuration of the Kibana type.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for an Elasticsearch resource in this deployment.", + Computed: true, }, "healthy": { - Type: schema.TypeBool, - Computed: true, + Type: types.BoolType, + Description: "Kibana resource health status.", + Computed: true, }, "http_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTP endpoint for the Kibana resource.", + Computed: true, }, "https_endpoint": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "HTTPS endpoint for the Kibana resource.", + Computed: true, }, "ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "A locally-unique friendly alias for this Kibana resource.", + Computed: true, }, "resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The resource unique identifier.", + Computed: true, }, "status": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Kibana resource status (for example, \"started\", \"stopped\", etc).", + Computed: true, }, "version": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Elastic stack version.", + Computed: true, }, "topology": kibanaTopologySchema(), - }, + }), } } -func kibanaTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - }, - "size_resource": { - Type: schema.TypeString, - Computed: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - }, +func kibanaResourceInfoAttrTypes() map[string]attr.Type { + return kibanaResourceInfoSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +func kibanaTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Node topology element definition.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: "Controls the allocation of this topology element as well as allowed sizes and node_types. It needs to match the ID of an existing instance configuration.", + Computed: true, + }, + "size": { + Type: types.StringType, + Description: "Amount of size_resource in Gigabytes. For example \"4g\".", + Computed: true, }, - }, + "size_resource": { + Type: types.StringType, + Description: "Type of resource (\"memory\" or \"storage\")", + Computed: true, + }, + "zone_count": { + Type: types.Int64Type, + Description: "Number of zones in which nodes will be placed.", + Computed: true, + }, + }), } } + +func kibanaTopologyAttrTypes() map[string]attr.Type { + return kibanaTopologySchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +type kibanaResourceInfoModelV0 struct { + ElasticsearchClusterRefID types.String `tfsdk:"elasticsearch_cluster_ref_id"` + Healthy types.Bool `tfsdk:"healthy"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + RefID types.String `tfsdk:"ref_id"` + ResourceID types.String `tfsdk:"resource_id"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` + Topology types.List `tfsdk:"topology"` //< kibanaTopologyModelV0 +} + +type kibanaTopologyModelV0 struct { + InstanceConfigurationID types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} diff --git a/ec/ecdatasource/deploymentdatasource/schema_observability.go b/ec/ecdatasource/deploymentdatasource/schema_observability.go new file mode 100644 index 000000000..c4acf4471 --- /dev/null +++ b/ec/ecdatasource/deploymentdatasource/schema_observability.go @@ -0,0 +1,66 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package deploymentdatasource + +import ( + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func observabilitySettingsSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Observability settings. Information about logs and metrics shipped to a dedicated deployment.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Description: "Destination deployment ID for the shipped logs and monitoring metrics.", + Computed: true, + }, + "ref_id": { + Type: types.StringType, + Description: "Elasticsearch resource kind ref_id of the destination deployment.", + Computed: true, + }, + "logs": { + Type: types.BoolType, + Description: "Defines whether logs are shipped to the destination deployment.", + Computed: true, + }, + "metrics": { + Type: types.BoolType, + Description: "Defines whether metrics are shipped to the destination deployment.", + Computed: true, + }, + }), + } +} + +func observabilitySettingsAttrTypes() map[string]attr.Type { + return observabilitySettingsSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +type observabilitySettingsModel struct { + DeploymentID types.String `tfsdk:"deployment_id"` + RefID types.String `tfsdk:"ref_id"` + Logs types.Bool `tfsdk:"logs"` + Metrics types.Bool `tfsdk:"metrics"` +} diff --git a/ec/ecdatasource/deploymentsdatasource/datasource.go b/ec/ecdatasource/deploymentsdatasource/datasource.go index 23eabdd1b..79f4df5b7 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource.go @@ -19,108 +19,136 @@ package deploymentsdatasource import ( "context" + "fmt" "strconv" - "time" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/terraform-provider-ec/ec/internal" ) -// DataSource returns the ec_deployments data source schema. -func DataSource() *schema.Resource { - return &schema.Resource{ - ReadContext: read, +var _ datasource.DataSource = &DataSource{} +var _ datasource.DataSourceWithConfigure = &DataSource{} + +type DataSource struct { + client *api.API +} - Schema: newSchema(), +func (d *DataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + d.client = client +} - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +func (d *DataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployments" } -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if d.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) - query, err := expandFilters(d) - if err != nil { - return diag.FromErr(err) + return + } + + var newState modelV0 + response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) + if response.Diagnostics.HasError() { + return + } + + query, diags := expandFilters(ctx, newState) + response.Diagnostics.Append(diags...) + if diags.HasError() { + return } res, err := deploymentapi.Search(deploymentapi.SearchParams{ - API: client, + API: d.client, Request: query, }) if err != nil { - return diag.FromErr(multierror.NewPrefixed("failed searching deployments", err)) + response.Diagnostics.AddError( + "Failed searching deployments", + fmt.Sprintf("Failed searching deployments version: %s", err), + ) + return } - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + response.Diagnostics.Append(modelToState(ctx, res, &newState)...) + if response.Diagnostics.HasError() { + return } - return nil + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } -func modelToState(d *schema.ResourceData, res *models.DeploymentsSearchResponse) error { - if d.Id() == "" { - if b, _ := res.MarshalBinary(); len(b) > 0 { - d.SetId(strconv.Itoa(schema.HashString(string(b)))) - } - } +func modelToState(ctx context.Context, res *models.DeploymentsSearchResponse, state *modelV0) diag.Diagnostics { + var diags diag.Diagnostics - if err := d.Set("return_count", res.ReturnCount); err != nil { - return err + if b, _ := res.MarshalBinary(); len(b) > 0 { + state.ID = types.String{Value: strconv.Itoa(schema.HashString(string(b)))} } + state.ReturnCount = types.Int64{Value: int64(*res.ReturnCount)} - var result = make([]interface{}, 0, len(res.Deployments)) + var result = make([]deploymentModelV0, 0, len(res.Deployments)) for _, deployment := range res.Deployments { - var m = make(map[string]interface{}) + var m deploymentModelV0 - m["deployment_id"] = *deployment.ID - m["alias"] = deployment.Alias + m.DeploymentID = types.String{Value: *deployment.ID} + m.Alias = types.String{Value: deployment.Alias} if deployment.Name != nil { - m["name"] = deployment.Name + m.Name = types.String{Value: *deployment.Name} } if len(deployment.Resources.Elasticsearch) > 0 { - m["elasticsearch_resource_id"] = *deployment.Resources.Elasticsearch[0].ID - m["elasticsearch_ref_id"] = *deployment.Resources.Elasticsearch[0].RefID + m.ElasticsearchResourceID = types.String{Value: *deployment.Resources.Elasticsearch[0].ID} + m.ElasticsearchRefID = types.String{Value: *deployment.Resources.Elasticsearch[0].RefID} } if len(deployment.Resources.Kibana) > 0 { - m["kibana_resource_id"] = *deployment.Resources.Kibana[0].ID - m["kibana_ref_id"] = *deployment.Resources.Kibana[0].RefID + m.KibanaResourceID = types.String{Value: *deployment.Resources.Kibana[0].ID} + m.KibanaRefID = types.String{Value: *deployment.Resources.Kibana[0].RefID} } if len(deployment.Resources.Apm) > 0 { - m["apm_resource_id"] = *deployment.Resources.Apm[0].ID - m["apm_ref_id"] = *deployment.Resources.Apm[0].RefID + m.ApmResourceID = types.String{Value: *deployment.Resources.Apm[0].ID} + m.ApmRefID = types.String{Value: *deployment.Resources.Apm[0].RefID} } if len(deployment.Resources.IntegrationsServer) > 0 { - m["integrations_server_resource_id"] = *deployment.Resources.IntegrationsServer[0].ID - m["integrations_server_ref_id"] = *deployment.Resources.IntegrationsServer[0].RefID + m.IntegrationsServerResourceID = types.String{Value: *deployment.Resources.IntegrationsServer[0].ID} + m.IntegrationsServerRefID = types.String{Value: *deployment.Resources.IntegrationsServer[0].RefID} } if len(deployment.Resources.EnterpriseSearch) > 0 { - m["enterprise_search_resource_id"] = *deployment.Resources.EnterpriseSearch[0].ID - m["enterprise_search_ref_id"] = *deployment.Resources.EnterpriseSearch[0].RefID + m.EnterpriseSearchResourceID = types.String{Value: *deployment.Resources.EnterpriseSearch[0].ID} + m.EnterpriseSearchRefID = types.String{Value: *deployment.Resources.EnterpriseSearch[0].RefID} } result = append(result, m) - if len(result) > 0 { - if err := d.Set("deployments", result); err != nil { - return err - } - } } - return nil + diags.Append(tfsdk.ValueFrom(ctx, result, types.ListType{ + ElemType: types.ObjectType{ + AttrTypes: deploymentAttrTypes(), + }, + }, &state.Deployments)...) + + return diags } diff --git a/ec/ecdatasource/deploymentsdatasource/datasource_test.go b/ec/ecdatasource/deploymentsdatasource/datasource_test.go index 417edf53e..23b4e2db4 100644 --- a/ec/ecdatasource/deploymentsdatasource/datasource_test.go +++ b/ec/ecdatasource/deploymentsdatasource/datasource_test.go @@ -18,47 +18,54 @@ package deploymentsdatasource import ( + "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) func Test_modelToState(t *testing.T) { - deploymentsSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentsSchemaArg.SetId("myID") - _ = deploymentsSchemaArg.Set("name_prefix", "test") - _ = deploymentsSchemaArg.Set("healthy", "true") - _ = deploymentsSchemaArg.Set("deployment_template_id", "azure-compute-optimized") + state := modelV0{ + ID: types.String{Value: "test"}, + NamePrefix: types.String{Value: "test"}, + Healthy: types.String{Value: "true"}, + DeploymentTemplateID: types.String{Value: "azure-compute-optimized"}, + } - wantDeployments := util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - State: map[string]interface{}{ - "id": "myID", - "name_prefix": "test", - "return_count": 1, - "deployment_template_id": "azure-compute-optimized", - "healthy": "true", - "deployments": []interface{}{map[string]interface{}{ - "name": "test-hello", - "alias": "dev", - "apm_resource_id": "9884c76ae1cd4521a0d9918a454a700d", - "apm_ref_id": "apm", - "deployment_id": "a8f22a9b9e684a7f94a89df74aa14331", - "elasticsearch_resource_id": "a98dd0dac15a48d5b3953384c7e571b9", - "elasticsearch_ref_id": "elasticsearch", - "enterprise_search_resource_id": "f17e4d8a61b14c12b020d85b723357ba", - "enterprise_search_ref_id": "enterprise_search", - "kibana_resource_id": "c75297d672b54da68faecededf372f87", - "kibana_ref_id": "kibana", + wantDeployments := modelV0{ + ID: types.String{Value: "2705093922"}, + NamePrefix: types.String{Value: "test"}, + ReturnCount: types.Int64{Value: 1}, + DeploymentTemplateID: types.String{Value: "azure-compute-optimized"}, + Healthy: types.String{Value: "true"}, + Deployments: types.List{ + ElemType: types.ObjectType{AttrTypes: deploymentAttrTypes()}, + Elems: []attr.Value{types.Object{ + AttrTypes: deploymentAttrTypes(), + Attrs: map[string]attr.Value{ + "name": types.String{Value: "test-hello"}, + "alias": types.String{Value: "dev"}, + "apm_resource_id": types.String{Value: "9884c76ae1cd4521a0d9918a454a700d"}, + "apm_ref_id": types.String{Value: "apm"}, + "deployment_id": types.String{Value: "a8f22a9b9e684a7f94a89df74aa14331"}, + "elasticsearch_resource_id": types.String{Value: "a98dd0dac15a48d5b3953384c7e571b9"}, + "elasticsearch_ref_id": types.String{Value: "elasticsearch"}, + "enterprise_search_resource_id": types.String{Value: "f17e4d8a61b14c12b020d85b723357ba"}, + "enterprise_search_ref_id": types.String{Value: "enterprise_search"}, + "kibana_resource_id": types.String{Value: "c75297d672b54da68faecededf372f87"}, + "kibana_ref_id": types.String{Value: "kibana"}, + "integrations_server_resource_id": types.String{Value: "3b3025a012fd3dd5c9dcae2a1ac89c6f"}, + "integrations_server_ref_id": types.String{Value: "integrations_server"}, + }, }}, }, - Schema: newSchema(), - }) + } searchResponse := &models.DeploymentsSearchResponse{ ReturnCount: ec.Int32(1), @@ -105,79 +112,47 @@ func Test_modelToState(t *testing.T) { RefID: ec.String("enterprise_search"), }, }, + IntegrationsServer: []*models.IntegrationsServerResourceInfo{ + { + ID: ec.String("3b3025a012fd3dd5c9dcae2a1ac89c6f"), + RefID: ec.String("integrations_server"), + }, + }, }, }, }, } - deploymentsSchemaArgNoID := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentsSchemaArgNoID.SetId("") - _ = deploymentsSchemaArgNoID.Set("name_prefix", "test") - _ = deploymentsSchemaArgNoID.Set("healthy", "true") - _ = deploymentsSchemaArgNoID.Set("deployment_template_id", "azure-compute-optimized") - - wantDeploymentsNoID := util.NewResourceData(t, util.ResDataParams{ - ID: "3825846481", - State: map[string]interface{}{ - "id": "myID", - "name_prefix": "test", - "return_count": 1, - "deployment_template_id": "azure-compute-optimized", - "healthy": "true", - "deployments": []interface{}{map[string]interface{}{ - "name": "test-hello", - "alias": "dev", - "apm_resource_id": "9884c76ae1cd4521a0d9918a454a700d", - "apm_ref_id": "apm", - "deployment_id": "a8f22a9b9e684a7f94a89df74aa14331", - "elasticsearch_resource_id": "a98dd0dac15a48d5b3953384c7e571b9", - "elasticsearch_ref_id": "elasticsearch", - "enterprise_search_resource_id": "f17e4d8a61b14c12b020d85b723357ba", - "enterprise_search_ref_id": "enterprise_search", - "kibana_resource_id": "c75297d672b54da68faecededf372f87", - "kibana_ref_id": "kibana", - }}, - }, - Schema: newSchema(), - }) - type args struct { - d *schema.ResourceData - res *models.DeploymentsSearchResponse + state modelV0 + res *models.DeploymentsSearchResponse } tests := []struct { - name string - args args - want *schema.ResourceData - err error + name string + args args + want modelV0 + diags error }{ { name: "flattens deployment resources", want: wantDeployments, args: args{ - d: deploymentsSchemaArg, - res: searchResponse, - }, - }, - { - name: "flattens deployment resources and sets the ID", - args: args{ - d: deploymentsSchemaArgNoID, - res: searchResponse, + state: state, + res: searchResponse, }, - want: wantDeploymentsNoID, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) - if tt.err != nil || err != nil { - assert.EqualError(t, err, tt.err.Error()) + state = tt.args.state + diags := modelToState(context.Background(), tt.args.res, &state) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, state) }) } } diff --git a/ec/ecdatasource/deploymentsdatasource/expanders.go b/ec/ecdatasource/deploymentsdatasource/expanders.go index 01e5062a4..a1bfdeac4 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders.go @@ -18,29 +18,34 @@ package deploymentsdatasource import ( + "context" "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) // expandFilters expands all filters into a search request model -func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { +func expandFilters(ctx context.Context, state modelV0) (*models.SearchRequest, diag.Diagnostics) { + var diags diag.Diagnostics var queries []*models.QueryContainer - namePrefix := d.Get("name_prefix").(string) + namePrefix := state.NamePrefix.Value if namePrefix != "" { queries = append(queries, &models.QueryContainer{ Prefix: map[string]models.PrefixQuery{ // The "keyword" addition denotes that the query will be using a keyword // field rather than a text field in order to ensure the query is not analyzed - "name.keyword": {Value: ec.String(namePrefix)}, + "name.keyword": {Value: &namePrefix}, }, }) } - depTemplateID := d.Get("deployment_template_id").(string) + depTemplateID := state.DeploymentTemplateID.Value if depTemplateID != "" { esPath := "resources.elasticsearch" tplTermPath := esPath + ".info.plan_info.current.plan.deployment_template.id" @@ -48,10 +53,12 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { queries = append(queries, newNestedTermQuery(esPath, tplTermPath, depTemplateID)) } - healthy := d.Get("healthy").(string) + healthy := state.Healthy.Value if healthy != "" { if healthy != "true" && healthy != "false" { - return nil, fmt.Errorf("invalid value for healthy (true|false): '%s'", healthy) + diags.AddError("invalid value for healthy", + fmt.Sprintf("expected either [true] or [false] but got [%s]", healthy)) + return nil, diags } queries = append(queries, &models.QueryContainer{ @@ -61,11 +68,16 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { }) } - tags := d.Get("tags").(map[string]interface{}) + var tags = make(map[string]string) + diags.Append(state.Tags.ElementsAs(ctx, &tags, false)...) + if diags.HasError() { + return nil, diags + } + var tagQueries []*models.QueryContainer for key, value := range tags { tagQueries = append(tagQueries, - newNestedTagQuery(key, value.(string)), + newNestedTagQuery(key, value), ) } if len(tagQueries) > 0 { @@ -76,20 +88,29 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { }, }) } + type resourceFilter struct { + resourceKind string + settings *types.List + } - validResourceKinds := []string{util.Elasticsearch, util.Kibana, - util.Apm, util.EnterpriseSearch, util.IntegrationsServer} + resourceFilters := []resourceFilter{ + {resourceKind: util.Elasticsearch, settings: &state.Elasticsearch}, + {resourceKind: util.Kibana, settings: &state.Kibana}, + {resourceKind: util.Apm, settings: &state.Apm}, + {resourceKind: util.EnterpriseSearch, settings: &state.EnterpriseSearch}, + {resourceKind: util.IntegrationsServer, settings: &state.IntegrationsServer}, + } - for _, resourceKind := range validResourceKinds { - req, err := expandResourceFilters(d.Get(resourceKind).([]interface{}), resourceKind) - if err != nil { - return nil, err + for _, filter := range resourceFilters { + req, diags := expandResourceFilters(ctx, filter.settings, filter.resourceKind) + if diags.HasError() { + return nil, diags } queries = append(queries, req...) } searchReq := models.SearchRequest{ - Size: int32(d.Get("size").(int)), + Size: int32(state.Size.Value), Sort: []interface{}{"id"}, } @@ -111,42 +132,45 @@ func expandFilters(d *schema.ResourceData) (*models.SearchRequest, error) { } // expandResourceFilters expands filters from a specific resource kind into query models -func expandResourceFilters(resources []interface{}, resourceKind string) ([]*models.QueryContainer, error) { - if len(resources) == 0 { +func expandResourceFilters(ctx context.Context, resources *types.List, resourceKind string) ([]*models.QueryContainer, diag.Diagnostics) { + var diags diag.Diagnostics + if len(resources.Elems) == 0 { return nil, nil } - + var filters []resourceFiltersModelV0 var queries []*models.QueryContainer - - for _, raw := range resources { - var q = raw.(map[string]interface{}) - + diags.Append(resources.ElementsAs(ctx, &filters, false)...) + if diags.HasError() { + return nil, diags + } + for _, filter := range filters { resourceKindPath := "resources." + resourceKind - if status, ok := q["status"].(string); ok && status != "" { + if filter.Status.Value != "" { statusTermPath := resourceKindPath + ".info.status" queries = append(queries, - newNestedTermQuery(resourceKindPath, statusTermPath, status)) + newNestedTermQuery(resourceKindPath, statusTermPath, filter.Status.Value)) } - if version, ok := q["version"].(string); ok && version != "" { + if filter.Version.Value != "" { versionTermPath := resourceKindPath + ".info.plan_info.current.plan." + resourceKind + ".version" queries = append(queries, - newNestedTermQuery(resourceKindPath, versionTermPath, version)) + newNestedTermQuery(resourceKindPath, versionTermPath, filter.Version.Value)) } - if healthy, ok := q["healthy"].(string); ok && healthy != "" { + if filter.Healthy.Value != "" { healthyTermPath := resourceKindPath + ".info.healthy" - if healthy != "true" && healthy != "false" { - return nil, fmt.Errorf("invalid value for healthy (true|false): '%s'", healthy) + if filter.Healthy.Value != "true" && filter.Healthy.Value != "false" { + diags.AddError("invalid value for healthy", fmt.Sprintf("expected either [true] or [false] but got [%s]", filter.Healthy.Value)) + return nil, diags } queries = append(queries, - newNestedTermQuery(resourceKindPath, healthyTermPath, healthy)) + newNestedTermQuery(resourceKindPath, healthyTermPath, filter.Healthy.Value)) } } @@ -156,11 +180,11 @@ func expandResourceFilters(resources []interface{}, resourceKind string) ([]*mod func newNestedTermQuery(path, term string, value string) *models.QueryContainer { return &models.QueryContainer{ Nested: &models.NestedQuery{ - Path: ec.String(path), + Path: &path, Query: &models.QueryContainer{ Term: map[string]models.TermQuery{ term: { - Value: ec.String(value), + Value: &value, }, }, }, diff --git a/ec/ecdatasource/deploymentsdatasource/expanders_test.go b/ec/ecdatasource/deploymentsdatasource/expanders_test.go index 6410e1f4f..41ffc4851 100644 --- a/ec/ecdatasource/deploymentsdatasource/expanders_test.go +++ b/ec/ecdatasource/deploymentsdatasource/expanders_test.go @@ -18,41 +18,35 @@ package deploymentsdatasource import ( + "context" "encoding/json" - "errors" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_expandFilters(t *testing.T) { - deploymentsDS := util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - State: newSampleFilters(), - Schema: newSchema(), - }) - invalidDS := util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - State: newInvalidFilters(), - Schema: newSchema(), - }) type args struct { - d *schema.ResourceData + state modelV0 } tests := []struct { - name string - args args - want *models.SearchRequest - err error + name string + args args + want *models.SearchRequest + diags diag.Diagnostics }{ { name: "parses the data source", - args: args{d: deploymentsDS}, + args: args{state: newSampleFilters()}, want: &models.SearchRequest{ Size: 100, Sort: []interface{}{"id"}, @@ -71,38 +65,58 @@ func Test_expandFilters(t *testing.T) { }, { name: "parses the data source with a different size", - args: args{d: util.NewResourceData(t, util.ResDataParams{ - ID: "myID", - Schema: newSchema(), - State: map[string]interface{}{ - "name_prefix": "test", - "healthy": "true", - "size": 200, - "tags": map[string]interface{}{ - "foo": "bar", - }, - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.9.1", - }, + args: args{ + state: modelV0{ + NamePrefix: types.String{Value: "test"}, + Healthy: types.String{Value: "true"}, + Size: types.Int64{Value: 200}, + Tags: util.StringMapAsType(map[string]string{"foo": "bar"}), + Elasticsearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Null: true}, + "version": types.String{Value: "7.9.1"}, + }, + }}, }, - "kibana": []interface{}{ - map[string]interface{}{ - "status": "started", - }, + Kibana: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Value: "started"}, + "version": types.String{Null: true}, + }, + }}, }, - "apm": []interface{}{ - map[string]interface{}{ - "healthy": "true", - }, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Value: "true"}, + "status": types.String{Null: true}, + "version": types.String{Null: true}, + }, + }}, }, - "enterprise_search": []interface{}{ - map[string]interface{}{ - "healthy": "false", - }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind), + Attrs: map[string]attr.Value{ + "status": types.String{Null: true}, + "healthy": types.String{Value: "false"}, + "version": types.String{Null: true}, + }, + }}, }, }, - })}, + }, want: &models.SearchRequest{ Size: 200, Sort: []interface{}{"id"}, @@ -120,18 +134,18 @@ func Test_expandFilters(t *testing.T) { }, }, { - name: "fails to parse the data source", - args: args{d: invalidDS}, - err: errors.New("invalid value for healthy (true|false): 'invalid value'"), + name: "fails to parse the data source", + args: args{state: newInvalidFilters()}, + diags: diag.Diagnostics{diag.NewErrorDiagnostic("invalid value for healthy", "expected either [true] or [false] but got [invalid value]")}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := expandFilters(tt.args.d) - if tt.err != nil || err != nil { - assert.EqualError(t, err, tt.err.Error()) + got, diags := expandFilters(context.Background(), tt.args.state) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) } else { - assert.NoError(t, err) + assert.Empty(t, diags) } jsonWant, err := json.MarshalIndent(tt.want, "", " ") @@ -149,43 +163,72 @@ func Test_expandFilters(t *testing.T) { } } -func newInvalidFilters() map[string]interface{} { - return map[string]interface{}{ - "healthy": "invalid value", - "apm": []interface{}{ - map[string]interface{}{ - "healthy": "invalid value", - }, +func newInvalidFilters() modelV0 { + return modelV0{ + Healthy: types.String{Value: "invalid value"}, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Value: "invalid value"}, + }, + }}, }, } } -func newSampleFilters() map[string]interface{} { - return map[string]interface{}{ - "name_prefix": "test", - "healthy": "true", - "tags": map[string]interface{}{ - "foo": "bar", - }, - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.9.1", - }, +func newSampleFilters() modelV0 { + return modelV0{ + NamePrefix: types.String{Value: "test"}, + Healthy: types.String{Value: "true"}, + Size: types.Int64{Value: 100}, + Tags: types.Map{ElemType: types.StringType, Elems: map[string]attr.Value{ + "foo": types.String{Value: "bar"}, + }}, + Elasticsearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.ElasticsearchResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Null: true}, + "version": types.String{Value: "7.9.1"}, + }, + }}, }, - "kibana": []interface{}{ - map[string]interface{}{ - "status": "started", - }, + Kibana: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.KibanaResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Null: true}, + "status": types.String{Value: "started"}, + "version": types.String{Null: true}, + }, + }}, }, - "apm": []interface{}{ - map[string]interface{}{ - "healthy": "true", - }, + Apm: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.ApmResourceKind), + Attrs: map[string]attr.Value{ + "healthy": types.String{Value: "true"}, + "status": types.String{Null: true}, + "version": types.String{Null: true}, + }, + }}, }, - "enterprise_search": []interface{}{ - map[string]interface{}{ - "healthy": "false", - }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind)}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceFiltersAttrTypes(util.EnterpriseSearchResourceKind), + Attrs: map[string]attr.Value{ + "status": types.String{Null: true}, + "healthy": types.String{Value: "false"}, + "version": types.String{Null: true}, + }, + }}, }, } } diff --git a/ec/ecdatasource/deploymentsdatasource/schema.go b/ec/ecdatasource/deploymentsdatasource/schema.go index 8504e9676..52e9fd724 100644 --- a/ec/ecdatasource/deploymentsdatasource/schema.go +++ b/ec/ecdatasource/deploymentsdatasource/schema.go @@ -17,154 +17,215 @@ package deploymentsdatasource -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +import ( + "context" + "fmt" -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "name_prefix": { - Type: schema.TypeString, - Optional: true, - }, - "healthy": { - Type: schema.TypeString, - Optional: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Optional: true, - }, - "tags": { - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - "size": { - Type: schema.TypeInt, - Optional: true, - Default: 100, - }, + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" - // Computed - "return_count": { - Type: schema.TypeInt, - Computed: true, - }, - "deployments": { - Type: schema.TypeList, - Computed: true, - Elem: newDeploymentList(), - }, + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) - // Deployment resources - "elasticsearch": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "kibana": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "apm": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), - }, - "integrations_server": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), +func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "name_prefix": { + Type: types.StringType, + Description: "Prefix to filter the returned deployment list by.", + Optional: true, + }, + "healthy": { + Type: types.StringType, + Description: "Filter the result set by their health status.", + Optional: true, + }, + "deployment_template_id": { + Type: types.StringType, + Description: "Filter the result set by the ID of the deployment template the deployment is based off.", + Optional: true, + }, + "tags": { + Type: types.MapType{ElemType: types.StringType}, + Description: "Filter the result set by their assigned tags.", + Optional: true, + }, + "size": { + Type: types.Int64Type, + Description: "The maximum number of deployments to return. Defaults to 100.", + MarkdownDescription: "The maximum number of deployments to return. Defaults to `100`.", + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Int64{Value: 100}), + }, + }, + + // Computed + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this data source.", + }, + "return_count": { + Type: types.Int64Type, + Description: "The number of deployments actually returned.", + Computed: true, + }, + "deployments": deploymentsListSchema(), }, - "enterprise_search": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - Elem: newResourceFilters(), + Blocks: map[string]tfsdk.Block{ + // Deployment resources + "elasticsearch": resourceFiltersSchema(util.ElasticsearchResourceKind), + "kibana": resourceFiltersSchema(util.KibanaResourceKind), + "apm": resourceFiltersSchema(util.ApmResourceKind), + "integrations_server": resourceFiltersSchema(util.IntegrationsServerResourceKind), + "enterprise_search": resourceFiltersSchema(util.EnterpriseSearchResourceKind), }, - } + }, nil } -func newDeploymentList() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func deploymentsListSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "List of deployments which match the specified query.", + Computed: true, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "deployment_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The deployment unique ID.", + Computed: true, }, "name": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The name of the deployment.", + Computed: true, }, "alias": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Deployment alias.", + Computed: true, }, "elasticsearch_resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Elasticsearch resource unique ID.", + Computed: true, }, "elasticsearch_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Elasticsearch resource reference.", + Computed: true, }, "kibana_resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Kibana resource unique ID.", + Computed: true, }, "kibana_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Kibana resource reference.", + Computed: true, }, "apm_resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The APM resource unique ID.", + Computed: true, }, "apm_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The APM resource reference.", + Computed: true, }, "integrations_server_resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Integrations Server resource unique ID.", + Computed: true, }, "integrations_server_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Integrations Server resource reference.", + Computed: true, }, "enterprise_search_resource_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Enterprise Search resource unique ID.", + Computed: true, }, "enterprise_search_ref_id": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "The Enterprise Search resource reference.", + Computed: true, }, - }, + }), } } -func newResourceFilters() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ +func deploymentAttrTypes() map[string]attr.Type { + return deploymentsListSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes + +} + +func resourceFiltersSchema(resourceKind util.ResourceKind) tfsdk.Block { + return tfsdk.Block{ + Description: fmt.Sprintf("Filter by %s resource kind status or configuration.", resourceKind.Name()), + NestingMode: tfsdk.BlockNestingModeList, + Attributes: map[string]tfsdk.Attribute{ "healthy": { - Type: schema.TypeString, + Type: types.StringType, Optional: true, }, "status": { - Type: schema.TypeString, + Type: types.StringType, Optional: true, }, "version": { - Type: schema.TypeString, + Type: types.StringType, Optional: true, }, }, } } + +func resourceFiltersAttrTypes(resourceKind util.ResourceKind) map[string]attr.Type { + return resourceFiltersSchema(resourceKind).Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes + +} + +type modelV0 struct { + ID types.String `tfsdk:"id"` + NamePrefix types.String `tfsdk:"name_prefix"` + Healthy types.String `tfsdk:"healthy"` + DeploymentTemplateID types.String `tfsdk:"deployment_template_id"` + Tags types.Map `tfsdk:"tags"` + Size types.Int64 `tfsdk:"size"` + ReturnCount types.Int64 `tfsdk:"return_count"` + Deployments types.List `tfsdk:"deployments"` //< deploymentModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< resourceFiltersModelV0 + Kibana types.List `tfsdk:"kibana"` //< resourceFiltersModelV0 + Apm types.List `tfsdk:"apm"` //< resourceFiltersModelV0 + IntegrationsServer types.List `tfsdk:"integrations_server"` //< resourceFiltersModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< resourceFiltersModelV0 +} + +type deploymentModelV0 struct { + DeploymentID types.String `tfsdk:"deployment_id"` + Name types.String `tfsdk:"name"` + Alias types.String `tfsdk:"alias"` + ElasticsearchResourceID types.String `tfsdk:"elasticsearch_resource_id"` + ElasticsearchRefID types.String `tfsdk:"elasticsearch_ref_id"` + KibanaResourceID types.String `tfsdk:"kibana_resource_id"` + KibanaRefID types.String `tfsdk:"kibana_ref_id"` + ApmResourceID types.String `tfsdk:"apm_resource_id"` + ApmRefID types.String `tfsdk:"apm_ref_id"` + IntegrationsServerResourceID types.String `tfsdk:"integrations_server_resource_id"` + IntegrationsServerRefID types.String `tfsdk:"integrations_server_ref_id"` + EnterpriseSearchResourceID types.String `tfsdk:"enterprise_search_resource_id"` + EnterpriseSearchRefID types.String `tfsdk:"enterprise_search_ref_id"` +} + +type resourceFiltersModelV0 struct { + Healthy types.String `tfsdk:"healthy"` + Status types.String `tfsdk:"status"` + Version types.String `tfsdk:"version"` +} diff --git a/ec/ecdatasource/stackdatasource/datasource.go b/ec/ecdatasource/stackdatasource/datasource.go index c84bd1861..b4e9aefd4 100644 --- a/ec/ecdatasource/stackdatasource/datasource.go +++ b/ec/ecdatasource/stackdatasource/datasource.go @@ -21,61 +21,113 @@ import ( "context" "fmt" "regexp" - "strconv" - "time" + + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/stackapi" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + + "github.com/elastic/terraform-provider-ec/ec/internal" ) -// DataSource returns the ec_deployment data source schema. -func DataSource() *schema.Resource { - return &schema.Resource{ - ReadContext: read, +var _ datasource.DataSource = &DataSource{} +var _ datasource.DataSourceWithConfigure = &DataSource{} - Schema: newSchema(), +type DataSource struct { + client *api.API +} - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +func (d *DataSource) Configure(ctx context.Context, request datasource.ConfigureRequest, response *datasource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + d.client = client } -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - region := d.Get("region").(string) +func (d *DataSource) Metadata(ctx context.Context, request datasource.MetadataRequest, response *datasource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_stack" +} + +func (d DataSource) Read(ctx context.Context, request datasource.ReadRequest, response *datasource.ReadResponse) { + // Prevent panic if the provider has not been configured. + if d.client == nil { + response.Diagnostics.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return + } + + var newState modelV0 + response.Diagnostics.Append(request.Config.Get(ctx, &newState)...) + if response.Diagnostics.HasError() { + return + } res, err := stackapi.List(stackapi.ListParams{ - API: client, - Region: region, + API: d.client, + Region: newState.Region.Value, }) if err != nil { - return diag.FromErr( - multierror.NewPrefixed("failed retrieving the specified stack version", err), + response.Diagnostics.AddError( + "Failed retrieving the specified stack version", + fmt.Sprintf("Failed retrieving the specified stack version: %s", err), ) + return } - versionExpr := d.Get("version_regex").(string) - version := d.Get("version").(string) - lock := d.Get("lock").(bool) - stack, err := stackFromFilters(versionExpr, version, lock, res.Stacks) + stack, err := stackFromFilters(newState.VersionRegex.Value, newState.Version.Value, newState.Lock.Value, res.Stacks) if err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } + + response.Diagnostics.Append(modelToState(ctx, stack, &newState)...) + if response.Diagnostics.HasError() { + return } - if d.Id() == "" { - d.SetId(strconv.Itoa(schema.HashString(version))) + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) +} + +func modelToState(ctx context.Context, stack *models.StackVersionConfig, state *modelV0) diag.Diagnostics { + var diagnostics diag.Diagnostics + + state.ID = types.String{Value: stack.Version} + state.Version = types.String{Value: stack.Version} + if stack.Accessible != nil { + state.Accessible = types.Bool{Value: *stack.Accessible} } - if err := modelToState(d, stack); err != nil { - diag.FromErr(err) + state.MinUpgradableFrom = types.String{Value: stack.MinUpgradableFrom} + + if len(stack.UpgradableTo) > 0 { + diagnostics.Append(tfsdk.ValueFrom(ctx, stack.UpgradableTo, types.ListType{ElemType: types.StringType}, &state.UpgradableTo)...) } - return nil + if stack.Whitelisted != nil { + state.AllowListed = types.Bool{Value: *stack.Whitelisted} + } + + var diags diag.Diagnostics + state.Apm, diags = flattenApmConfig(ctx, stack.Apm) + diagnostics.Append(diags...) + + state.Elasticsearch, diags = flattenElasticsearchConfig(ctx, stack.Elasticsearch) + diagnostics.Append(diags...) + + state.EnterpriseSearch, diags = flattenEnterpriseSearchConfig(ctx, stack.EnterpriseSearch) + diagnostics.Append(diags...) + + state.Kibana, diags = flattenKibanaConfig(ctx, stack.Kibana) + diagnostics.Append(diags...) + + return diagnostics } func stackFromFilters(expr, version string, locked bool, stacks []*models.StackVersionConfig) (*models.StackVersionConfig, error) { @@ -103,52 +155,17 @@ func stackFromFilters(expr, version string, locked bool, stacks []*models.StackV ) } -func modelToState(d *schema.ResourceData, stack *models.StackVersionConfig) error { - if stack == nil { - return nil +func newElasticsearchConfigModelV0() elasticsearchConfigModelV0 { + return elasticsearchConfigModelV0{ + DenyList: types.List{ElemType: types.StringType}, + CompatibleNodeTypes: types.List{ElemType: types.StringType}, + Plugins: types.List{ElemType: types.StringType}, + DefaultPlugins: types.List{ElemType: types.StringType}, } - - if err := d.Set("version", stack.Version); err != nil { - return err - } - - if stack.Accessible != nil { - if err := d.Set("accessible", *stack.Accessible); err != nil { - return err - } - } - - if err := d.Set("min_upgradable_from", stack.MinUpgradableFrom); err != nil { - return err - } - - if len(stack.UpgradableTo) > 0 { - if err := d.Set("upgradable_to", stack.UpgradableTo); err != nil { - return err - } - } - - if stack.Whitelisted != nil { - if err := d.Set("allowlisted", *stack.Whitelisted); err != nil { - return err - } - } - - if err := d.Set("apm", flattenApmResources(stack.Apm)); err != nil { - return err - } - - if err := d.Set("elasticsearch", flattenElasticsearchResources(stack.Elasticsearch)); err != nil { - return err - } - - if err := d.Set("enterprise_search", flattenEnterpriseSearchResources(stack.EnterpriseSearch)); err != nil { - return err - } - - if err := d.Set("kibana", flattenKibanaResources(stack.Kibana)); err != nil { - return err +} +func newResourceKindConfigModelV0() resourceKindConfigModelV0 { + return resourceKindConfigModelV0{ + DenyList: types.List{ElemType: types.StringType}, + CompatibleNodeTypes: types.List{ElemType: types.StringType}, } - - return nil } diff --git a/ec/ecdatasource/stackdatasource/datasource_test.go b/ec/ecdatasource/stackdatasource/datasource_test.go index a86d71d56..17c7e345c 100644 --- a/ec/ecdatasource/stackdatasource/datasource_test.go +++ b/ec/ecdatasource/stackdatasource/datasource_test.go @@ -18,46 +18,44 @@ package stackdatasource import ( + "context" "errors" "fmt" "regexp/syntax" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_modelToState(t *testing.T) { - deploymentSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentSchemaArg.SetId("someid") - _ = deploymentSchemaArg.Set("region", "us-east-1") - _ = deploymentSchemaArg.Set("version_regex", "latest") - - wantDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: "someid", - State: newSampleStack(), - Schema: newSchema(), - }) + state := modelV0{ + Region: types.String{Value: "us-east-1"}, + VersionRegex: types.String{Value: "latest"}, + } type args struct { - d *schema.ResourceData - res *models.StackVersionConfig + state modelV0 + res *models.StackVersionConfig } tests := []struct { name string args args - want *schema.ResourceData + want modelV0 err error }{ { - name: "flattens deployment resources", - want: wantDeployment, + name: "flattens stack resources", + want: newSampleStack(), args: args{ - d: deploymentSchemaArg, + state: state, res: &models.StackVersionConfig{ Version: "7.9.1", Accessible: ec.Bool(true), @@ -117,68 +115,100 @@ func Test_modelToState(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } + state = tt.args.state + diags := modelToState(context.Background(), tt.args.res, &state) + assert.Empty(t, diags) - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) + assert.Equal(t, tt.want, state) }) } } -func newSampleStack() map[string]interface{} { - return map[string]interface{}{ - "id": "someid", - "region": "us-east-1", - "version_regex": "latest", - - "version": "7.9.1", - "accessible": true, - "allowlisted": true, - "min_upgradable_from": "6.8.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "default_plugins": []interface{}{"repository-s3"}, - "docker_image": "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0", - "plugins": []interface{}{ - "analysis-icu", - "analysis-kuromoji", - "analysis-nori", - "analysis-phonetic", - "analysis-smartcn", - "analysis-stempel", - "analysis-ukrainian", - "ingest-attachment", - "mapper-annotated-text", - "mapper-murmur3", - "mapper-size", - "repository-azure", - "repository-gcs", +func newSampleStack() modelV0 { + return modelV0{ + ID: types.String{Value: "7.9.1"}, + Region: types.String{Value: "us-east-1"}, + Version: types.String{Value: "7.9.1"}, + VersionRegex: types.String{Value: "latest"}, + Accessible: types.Bool{Value: true}, + AllowListed: types.Bool{Value: true}, + MinUpgradableFrom: types.String{Value: "6.8.0"}, + Elasticsearch: types.List{ + ElemType: types.ObjectType{ + AttrTypes: elasticsearchConfigAttrTypes(), + }, + Elems: []attr.Value{types.Object{ + AttrTypes: elasticsearchConfigAttrTypes(), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0"}, + "plugins": util.StringListAsType([]string{ + "analysis-icu", + "analysis-kuromoji", + "analysis-nori", + "analysis-phonetic", + "analysis-smartcn", + "analysis-stempel", + "analysis-ukrainian", + "ingest-attachment", + "mapper-annotated-text", + "mapper-murmur3", + "mapper-size", + "repository-azure", + "repository-gcs", + }), + "default_plugins": util.StringListAsType([]string{"repository-s3"}), + }, + }}, + }, + Kibana: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(util.KibanaResourceKind), }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/kibana:7.9.1-0", - }}, - "apm": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/apm:7.9.1-0", - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0", - }}, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(util.KibanaResourceKind), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/kibana:7.9.1-0"}, + }, + }}, + }, + EnterpriseSearch: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(util.EnterpriseSearchResourceKind), + }, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(util.EnterpriseSearchResourceKind), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"}, + }, + }}, + }, + Apm: types.List{ + ElemType: types.ObjectType{ + AttrTypes: resourceKindConfigAttrTypes(util.ApmResourceKind), + }, + Elems: []attr.Value{types.Object{ + AttrTypes: resourceKindConfigAttrTypes(util.ApmResourceKind), + Attrs: map[string]attr.Value{ + "denylist": util.StringListAsType([]string{"some"}), + "capacity_constraints_max": types.Int64{Value: 8192}, + "capacity_constraints_min": types.Int64{Value: 512}, + "compatible_node_types": util.StringListAsType(nil), + "docker_image": types.String{Value: "docker.elastic.co/cloud-assets/apm:7.9.1-0"}, + }, + }}, + }, } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm.go b/ec/ecdatasource/stackdatasource/flatteners_apm.go index e85395309..e1ad0be79 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm.go @@ -18,40 +18,54 @@ package stackdatasource import ( - "github.com/elastic/cloud-sdk-go/pkg/models" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -// flattenApmResources takes in Apm resource models and returns its -// flattened form. -func flattenApmResources(res *models.StackVersionApmConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenApmConfig takes a StackVersionApmConfigs and flattens it. +func flattenApmConfig(ctx context.Context, res *models.StackVersionApmConfig) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + + target := types.List{ElemType: resourceKindConfigSchema(util.ApmResourceKind).FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return nil + return target, nil } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + target.Null = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = res.CompatibleNodeTypes + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + target.Null = false } - if len(m) == 0 { - return nil + if target.Null { + return target, diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(util.ApmResourceKind).FrameworkType(), &target)...) + + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go index bc82b7d28..ffb2a3d0c 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_apm_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_apm_test.go @@ -18,11 +18,16 @@ package stackdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenApmResource(t *testing.T) { @@ -32,15 +37,15 @@ func Test_flattenApmResource(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionApmConfig{}}, want: nil, }, @@ -54,18 +59,25 @@ func Test_flattenApmResource(t *testing.T) { }, DockerImage: ec.String("docker.elastic.co/cloud-assets/apm:7.9.1-0"), }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/apm:7.9.1-0", + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/apm:7.9.1-0"}, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenApmResources(tt.args.res) + apm, diags := flattenApmConfig(context.Background(), tt.args.res) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + apm.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "apm", apm) }) } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go index 1210111e7..820447afd 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch.go @@ -18,48 +18,63 @@ package stackdatasource import ( - "github.com/elastic/cloud-sdk-go/pkg/models" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/elastic/cloud-sdk-go/pkg/models" ) -// flattenElasticsearchResources takes in Elasticsearch resource models and returns its -// flattened form. -func flattenElasticsearchResources(res *models.StackVersionElasticsearchConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenElasticsearchConfig takes a StackVersionElasticsearchConfig and flattens it. +func flattenElasticsearchConfig(ctx context.Context, res *models.StackVersionElasticsearchConfig) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + model := newElasticsearchConfigModelV0() + + target := types.List{ElemType: elasticsearchConfigSchema().FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return nil + return target, diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + target.Null = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = util.StringToItems(res.CompatibleNodeTypes...) + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + target.Null = false } if len(res.Plugins) > 0 { - m["plugins"] = util.StringToItems(res.Plugins...) + diags.Append(tfsdk.ValueFrom(ctx, res.Plugins, types.ListType{ElemType: types.StringType}, &model.Plugins)...) + target.Null = false } if len(res.DefaultPlugins) > 0 { - m["default_plugins"] = util.StringToItems(res.DefaultPlugins...) + diags.Append(tfsdk.ValueFrom(ctx, res.DefaultPlugins, types.ListType{ElemType: types.StringType}, &model.DefaultPlugins)...) + target.Null = false } - if len(m) == 0 { - return nil + if target.Null { + return target, diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []elasticsearchConfigModelV0{model}, elasticsearchConfigSchema().FrameworkType(), &target)...) + + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go index e6448f486..dcb2f29d8 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_elasticsearch_test.go @@ -18,29 +18,34 @@ package stackdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -func Test_flattenElasticsearchResources(t *testing.T) { +func Test_flattenElasticsearchResource(t *testing.T) { type args struct { res *models.StackVersionElasticsearchConfig } tests := []struct { name string args args - want []interface{} + want []elasticsearchConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionElasticsearchConfig{}}, want: nil, }, @@ -70,13 +75,13 @@ func Test_flattenElasticsearchResources(t *testing.T) { "repository-gcs", }, }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "default_plugins": []interface{}{"repository-s3"}, - "docker_image": "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0", - "plugins": []interface{}{ + want: []elasticsearchConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/elasticsearch:7.9.1-0"}, + Plugins: util.StringListAsType([]string{ "analysis-icu", "analysis-kuromoji", "analysis-nori", @@ -90,14 +95,21 @@ func Test_flattenElasticsearchResources(t *testing.T) { "mapper-size", "repository-azure", "repository-gcs", - }, + }), + DefaultPlugins: util.StringListAsType([]string{"repository-s3"}), }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenElasticsearchResources(tt.args.res) + elasticsearch, diags := flattenElasticsearchConfig(context.Background(), tt.args.res) + assert.Empty(t, diags) + + var got []elasticsearchConfigModelV0 + elasticsearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "elasticsearch", elasticsearch) }) } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go index d20ca04f5..038400661 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search.go @@ -18,40 +18,54 @@ package stackdatasource import ( - "github.com/elastic/cloud-sdk-go/pkg/models" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -// flattenEnterpriseSearchResources takes in EnterpriseSearch resource models and returns its -// flattened form. -func flattenEnterpriseSearchResources(res *models.StackVersionEnterpriseSearchConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenEnterpriseSearchConfig takes a StackVersionEnterpriseSearchConfig and flattens it. +func flattenEnterpriseSearchConfig(ctx context.Context, res *models.StackVersionEnterpriseSearchConfig) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + + target := types.List{ElemType: resourceKindConfigSchema(util.EnterpriseSearchResourceKind).FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return nil + return target, diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + target.Null = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = res.CompatibleNodeTypes + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + target.Null = false } - if len(m) == 0 { - return nil + if target.Null { + return target, diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(util.EnterpriseSearchResourceKind).FrameworkType(), &target)...) + + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go index 70195b5d3..ced9bdeff 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_enterprise_search_test.go @@ -18,11 +18,16 @@ package stackdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenEnterpriseSearchResources(t *testing.T) { @@ -32,15 +37,15 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionEnterpriseSearchConfig{}}, want: nil, }, @@ -54,18 +59,25 @@ func Test_flattenEnterpriseSearchResources(t *testing.T) { }, DockerImage: ec.String("docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"), }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0", + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/enterprise_search:7.9.1-0"}, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEnterpriseSearchResources(tt.args.res) + enterpriseSearch, diags := flattenEnterpriseSearchConfig(context.Background(), tt.args.res) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + enterpriseSearch.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "enterprise_search", enterpriseSearch) }) } } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana.go b/ec/ecdatasource/stackdatasource/flatteners_kibana.go index 5401df150..6d9b2a98c 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana.go @@ -18,40 +18,54 @@ package stackdatasource import ( - "github.com/elastic/cloud-sdk-go/pkg/models" + "context" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -// flattenKibanaResources takes in Kibana resource models and returns its -// flattened form. -func flattenKibanaResources(res *models.StackVersionKibanaConfig) []interface{} { - var m = make(map[string]interface{}) +// flattenKibanaConfig takes a StackVersionKibanaConfig and flattens it. +func flattenKibanaConfig(ctx context.Context, res *models.StackVersionKibanaConfig) (types.List, diag.Diagnostics) { + var diags diag.Diagnostics + model := newResourceKindConfigModelV0() + + target := types.List{ElemType: resourceKindConfigSchema(util.KibanaResourceKind).FrameworkType().(types.ListType).ElemType} + target.Null = true if res == nil { - return nil + return target, diags } if len(res.Blacklist) > 0 { - m["denylist"] = util.StringToItems(res.Blacklist...) + diags.Append(tfsdk.ValueFrom(ctx, res.Blacklist, types.ListType{ElemType: types.StringType}, &model.DenyList)...) + target.Null = false } if res.CapacityConstraints != nil { - m["capacity_constraints_max"] = int(*res.CapacityConstraints.Max) - m["capacity_constraints_min"] = int(*res.CapacityConstraints.Min) + model.CapacityConstraintsMax = types.Int64{Value: int64(*res.CapacityConstraints.Max)} + model.CapacityConstraintsMin = types.Int64{Value: int64(*res.CapacityConstraints.Min)} + target.Null = false } if len(res.CompatibleNodeTypes) > 0 { - m["compatible_node_types"] = util.StringToItems(res.CompatibleNodeTypes...) + diags.Append(tfsdk.ValueFrom(ctx, res.CompatibleNodeTypes, types.ListType{ElemType: types.StringType}, &model.CompatibleNodeTypes)...) + target.Null = false } if res.DockerImage != nil && *res.DockerImage != "" { - m["docker_image"] = *res.DockerImage + model.DockerImage = types.String{Value: *res.DockerImage} + target.Null = false } - if len(m) == 0 { - return nil + if target.Null { + return target, diags } - return []interface{}{m} + diags.Append(tfsdk.ValueFrom(ctx, []resourceKindConfigModelV0{model}, resourceKindConfigSchema(util.KibanaResourceKind).FrameworkType(), &target)...) + + return target, diags } diff --git a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go index 74914db5e..266a28be5 100644 --- a/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go +++ b/ec/ecdatasource/stackdatasource/flatteners_kibana_test.go @@ -18,11 +18,16 @@ package stackdatasource import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_flattenKibanaResources(t *testing.T) { @@ -32,15 +37,15 @@ func Test_flattenKibanaResources(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []resourceKindConfigModelV0 }{ { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #1", args: args{}, want: nil, }, { - name: "empty resource list returns empty list", + name: "empty resource list returns empty list #2", args: args{res: &models.StackVersionKibanaConfig{}}, want: nil, }, @@ -54,18 +59,25 @@ func Test_flattenKibanaResources(t *testing.T) { }, DockerImage: ec.String("docker.elastic.co/cloud-assets/kibana:7.9.1-0"), }}, - want: []interface{}{map[string]interface{}{ - "denylist": []interface{}{"some"}, - "capacity_constraints_max": 8192, - "capacity_constraints_min": 512, - "docker_image": "docker.elastic.co/cloud-assets/kibana:7.9.1-0", + want: []resourceKindConfigModelV0{{ + DenyList: util.StringListAsType([]string{"some"}), + CapacityConstraintsMax: types.Int64{Value: 8192}, + CapacityConstraintsMin: types.Int64{Value: 512}, + CompatibleNodeTypes: util.StringListAsType(nil), + DockerImage: types.String{Value: "docker.elastic.co/cloud-assets/kibana:7.9.1-0"}, }}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenKibanaResources(tt.args.res) + kibana, diags := flattenKibanaConfig(context.Background(), tt.args.res) + assert.Empty(t, diags) + + var got []resourceKindConfigModelV0 + kibana.ElementsAs(context.Background(), &got, false) assert.Equal(t, tt.want, got) + + util.CheckConverionToAttrValue(t, &DataSource{}, "kibana", kibana) }) } } diff --git a/ec/ecdatasource/stackdatasource/schema.go b/ec/ecdatasource/stackdatasource/schema.go index 79621176c..042b9851d 100644 --- a/ec/ecdatasource/stackdatasource/schema.go +++ b/ec/ecdatasource/stackdatasource/schema.go @@ -18,103 +18,191 @@ package stackdatasource import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "context" + "fmt" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" ) -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "version_regex": { - Type: schema.TypeString, - Required: true, - }, - "region": { - Type: schema.TypeString, - Required: true, - }, - "lock": { - Type: schema.TypeBool, - Optional: true, - }, +func (d *DataSource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "version_regex": { + Type: types.StringType, + Required: true, + }, + "region": { + Type: types.StringType, + Required: true, + }, + "lock": { + Type: types.BoolType, + Optional: true, + }, - // Exported attributes - "version": { - Type: schema.TypeString, - Computed: true, - }, - "accessible": { - Type: schema.TypeBool, - Computed: true, - }, - "min_upgradable_from": { - Type: schema.TypeString, - Computed: true, - }, - "upgradable_to": { - Type: schema.TypeList, - Computed: true, - Elem: &schema.Schema{Type: schema.TypeString}, - }, - "allowlisted": { - Type: schema.TypeBool, - Computed: true, + // Computed attributes + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this data source.", + }, + "version": { + Type: types.StringType, + Computed: true, + }, + "accessible": { + Type: types.BoolType, + Computed: true, + }, + "min_upgradable_from": { + Type: types.StringType, + Computed: true, + }, + "upgradable_to": { + Type: types.ListType{ElemType: types.StringType}, + Computed: true, + }, + "allowlisted": { + Type: types.BoolType, + Computed: true, + }, + "apm": resourceKindConfigSchema(util.ApmResourceKind), + "enterprise_search": resourceKindConfigSchema(util.EnterpriseSearchResourceKind), + "elasticsearch": elasticsearchConfigSchema(), + "kibana": resourceKindConfigSchema(util.KibanaResourceKind), }, - - "apm": newKindResourceSchema(), - "enterprise_search": newKindResourceSchema(), - "elasticsearch": newKindResourceSchema(), - "kibana": newKindResourceSchema(), - } + }, nil } -func newKindResourceSchema() *schema.Schema { - return &schema.Schema{ - Computed: true, - Type: schema.TypeList, - Elem: &schema.Resource{Schema: map[string]*schema.Schema{ +func elasticsearchConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Information for Elasticsearch workloads on this stack version.", + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ "denylist": { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, + Type: types.ListType{ElemType: types.StringType}, + Description: "List of configuration options that cannot be overridden by user settings.", + Computed: true, }, "capacity_constraints_max": { - Type: schema.TypeInt, - Computed: true, + Type: types.Int64Type, + Description: "Maximum size of the instances.", + Computed: true, }, "capacity_constraints_min": { - Type: schema.TypeInt, - Computed: true, + Type: types.Int64Type, + Description: "Minimum size of the instances.", + Computed: true, }, "compatible_node_types": { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, + Type: types.ListType{ElemType: types.StringType}, + Description: "List of node types compatible with this one.", + Computed: true, }, "docker_image": { - Type: schema.TypeString, - Computed: true, + Type: types.StringType, + Description: "Docker image to use for the Elasticsearch cluster instances.", + Computed: true, }, "plugins": { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, + Type: types.ListType{ElemType: types.StringType}, + Description: "List of available plugins to be specified by users in Elasticsearch cluster instances.", + Computed: true, }, "default_plugins": { - Computed: true, - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, + Type: types.ListType{ElemType: types.StringType}, + Description: "List of default plugins.", + Computed: true, }, + // node_types not added. It is highly unlikely they will be used + // for anything, and if they're needed in the future, then we can + // invest on adding them. + }), + } +} + +func elasticsearchConfigAttrTypes() map[string]attr.Type { + return elasticsearchConfigSchema().Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} +func resourceKindConfigSchema(resourceKind util.ResourceKind) tfsdk.Attribute { + return tfsdk.Attribute{ + Description: fmt.Sprintf("Information for %s workloads on this stack version.", resourceKind.Name()), + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "denylist": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of configuration options that cannot be overridden by user settings.", + Computed: true, + }, + "capacity_constraints_max": { + Type: types.Int64Type, + Description: "Maximum size of the instances.", + Computed: true, + }, + "capacity_constraints_min": { + Type: types.Int64Type, + Description: "Minimum size of the instances.", + Computed: true, + }, + "compatible_node_types": { + Type: types.ListType{ElemType: types.StringType}, + Description: "List of node types compatible with this one.", + Computed: true, + }, + "docker_image": { + Type: types.StringType, + Description: fmt.Sprintf("Docker image to use for the %s instance.", resourceKind.Name()), + Computed: true, + }, // node_types not added. It is highly unlikely they will be used // for anything, and if they're needed in the future, then we can // invest on adding them. - }}, + }), } } + +func resourceKindConfigAttrTypes(resourceKind util.ResourceKind) map[string]attr.Type { + return resourceKindConfigSchema(resourceKind).Attributes.Type().(types.ListType).ElemType.(types.ObjectType).AttrTypes +} + +type modelV0 struct { + ID types.String `tfsdk:"id"` + VersionRegex types.String `tfsdk:"version_regex"` + Region types.String `tfsdk:"region"` + Lock types.Bool `tfsdk:"lock"` + Version types.String `tfsdk:"version"` + Accessible types.Bool `tfsdk:"accessible"` + MinUpgradableFrom types.String `tfsdk:"min_upgradable_from"` + UpgradableTo types.List `tfsdk:"upgradable_to"` + AllowListed types.Bool `tfsdk:"allowlisted"` + Apm types.List `tfsdk:"apm"` //< resourceKindConfigModelV0 + EnterpriseSearch types.List `tfsdk:"enterprise_search"` //< resourceKindConfigModelV0 + Elasticsearch types.List `tfsdk:"elasticsearch"` //< elasticsearchConfigModelV0 + Kibana types.List `tfsdk:"kibana"` //< resourceKindConfigModelV0 +} + +type elasticsearchConfigModelV0 struct { + DenyList types.List `tfsdk:"denylist"` + CapacityConstraintsMax types.Int64 `tfsdk:"capacity_constraints_max"` + CapacityConstraintsMin types.Int64 `tfsdk:"capacity_constraints_min"` + CompatibleNodeTypes types.List `tfsdk:"compatible_node_types"` + DockerImage types.String `tfsdk:"docker_image"` + Plugins types.List `tfsdk:"plugins"` + DefaultPlugins types.List `tfsdk:"default_plugins"` +} + +type resourceKindConfigModelV0 struct { + DenyList types.List `tfsdk:"denylist"` + CapacityConstraintsMax types.Int64 `tfsdk:"capacity_constraints_max"` + CapacityConstraintsMin types.Int64 `tfsdk:"capacity_constraints_min"` + CompatibleNodeTypes types.List `tfsdk:"compatible_node_types"` + DockerImage types.String `tfsdk:"docker_image"` +} diff --git a/ec/ecresource/deploymentresource/apm/v1/apm.go b/ec/ecresource/deploymentresource/apm/v1/apm.go new file mode 100644 index 000000000..687aeb530 --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v1/apm.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ApmTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type Apm struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology v1.Topologies `tfsdk:"topology"` + Config ApmConfigs `tfsdk:"config"` +} + +type Apms []Apm diff --git a/ec/ecresource/deploymentresource/apm/v1/apm_config.go b/ec/ecresource/deploymentresource/apm/v1/apm_config.go new file mode 100644 index 000000000..5f82dbf0c --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v1/apm_config.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ApmConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + DebugEnabled types.Bool `tfsdk:"debug_enabled"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type ApmConfig struct { + DockerImage *string `tfsdk:"docker_image"` + DebugEnabled *bool `tfsdk:"debug_enabled"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type ApmConfigs []ApmConfig diff --git a/ec/ecresource/deploymentresource/apm/v1/schema.go b/ec/ecresource/deploymentresource/apm/v1/schema.go new file mode 100644 index 000000000..07aaeb41c --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v1/schema.go @@ -0,0 +1,159 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ApmTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + }), + } +} + +func ApmConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optionally define the Apm configuration options for the APM Server`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the APM nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", + Optional: true, + }, + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for APM servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + } +} + +func ApmSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional APM resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-apm"}), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + }, + "region": { + Type: types.StringType, + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + }, + "topology": ApmTopologySchema(), + "config": ApmConfigSchema(), + }), + } +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_config.go b/ec/ecresource/deploymentresource/apm/v2/apm_config.go new file mode 100644 index 000000000..1fe8d6c6f --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_config.go @@ -0,0 +1,107 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type ApmConfig = v1.ApmConfig + +func readApmConfigs(in *models.ApmConfiguration) (v1.ApmConfigs, error) { + var cfg ApmConfig + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if in.SystemSettings != nil && in.SystemSettings.DebugEnabled != nil { + cfg.DebugEnabled = in.SystemSettings.DebugEnabled + } + + if cfg == (ApmConfig{}) { + return nil, nil + } + + return v1.ApmConfigs{cfg}, nil +} + +func apmConfigPayload(ctx context.Context, cfg v1.ApmConfigTF, model *models.ApmConfiguration) diag.Diagnostics { + if !cfg.DebugEnabled.IsNull() { + if model.SystemSettings == nil { + model.SystemSettings = &models.ApmSystemSettings{} + } + model.SystemSettings.DebugEnabled = &cfg.DebugEnabled.Value + } + + var diags diag.Diagnostics + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &model.UserSettingsJSON); err != nil { + diags.AddError("failed expanding apm user_settings_json", err.Error()) + return diags + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &model.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding apm user_settings_override_json", err.Error()) + return diags + } + } + + if !cfg.UserSettingsYaml.IsNull() { + model.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + model.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + model.DockerImage = cfg.DockerImage.Value + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_payload.go b/ec/ecresource/deploymentresource/apm/v2/apm_payload.go new file mode 100644 index 000000000..04e5a7ac7 --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_payload.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ApmTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + Config types.Object `tfsdk:"config"` +} + +func (apm ApmTF) payload(ctx context.Context, payload models.ApmPayload) (*models.ApmPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !apm.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &apm.ElasticsearchClusterRefId.Value + } + + if !apm.RefId.IsNull() { + payload.RefID = &apm.RefId.Value + } + + if apm.Region.Value != "" { + payload.Region = &apm.Region.Value + } + + if !apm.Config.IsNull() && !apm.Config.IsUnknown() { + var cfg v1.ApmConfigTF + + ds := tfsdk.ValueAs(ctx, apm.Config, &cfg) + + diags.Append(ds...) + + if !ds.HasError() { + diags.Append(apmConfigPayload(ctx, cfg, payload.Plan.Apm)...) + } + } + + topology := topologyv1.TopologyTF{ + InstanceConfigurationId: apm.InstanceConfigurationId, + Size: apm.Size, + SizeResource: apm.SizeResource, + ZoneCount: apm.ZoneCount, + } + + topologyPayload, ds := apmTopologyPayload(ctx, topology, defaultApmTopology(payload.Plan.ClusterTopology), 0) + + diags.Append(ds...) + + if !ds.HasError() && topologyPayload != nil { + payload.Plan.ClusterTopology = []*models.ApmTopologyElement{topologyPayload} + } + + return &payload, diags +} + +func ApmPayload(ctx context.Context, apmObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.ApmPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + var apm *ApmTF + + if diags = tfsdk.ValueAs(ctx, apmObj, &apm); diags.HasError() { + return nil, diags + } + + if apm == nil { + return nil, nil + } + + templatePayload := payloadFromTemplate(template) + + if templatePayload == nil { + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + return nil, diags + } + + payload, diags := apm.payload(ctx, *templatePayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// payloadFromTemplate returns the ApmPayload from a deployment +// template or an empty version of the payload. +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.ApmPayload { + if template == nil || len(template.DeploymentTemplate.Resources.Apm) == 0 { + return nil + } + return template.DeploymentTemplate.Resources.Apm[0] +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go b/ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go new file mode 100644 index 000000000..b264918dd --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_payload_test.go @@ -0,0 +1,260 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_ApmPayload(t *testing.T) { + tplPath := "../../testdata/template-aws-io-optimized-v2.json" + tpl := func() *models.DeploymentTemplateInfoV2 { + return testutil.ParseDeploymentTemplate(t, tplPath) + } + type args struct { + apm *Apm + tpl *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.ApmPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses an APM resource with explicit topology", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an APM resource with invalid instance_configuration_id", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("so invalid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError( + "cannot match topology element", + `apm topology: invalid instance_configuration_id: "so invalid" doesn't match any of the deployment template instance configurations`, + ) + return diags + }(), + }, + { + name: "parses an APM resource with no topology", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + { + name: "parses an APM resource with a topology element but no instance_configuration_id", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an APM resource with explicit topology and some config", + args: args{ + tpl: tpl(), + apm: &Apm{ + RefId: ec.String("tertiary-apm"), + ElasticsearchClusterRefId: ec.String("somerefid"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + Config: &v1.ApmConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\": \"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\": \"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.ApmPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("tertiary-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(true), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }, + }, + }, + }, + }, + { + name: "tries to parse an apm resource when the template doesn't have an APM instance set.", + args: args{ + tpl: nil, + apm: &Apm{ + RefId: ec.String("tertiary-apm"), + ElasticsearchClusterRefId: ec.String("somerefid"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &v1.ApmConfig{ + DebugEnabled: ec.Bool(true), + }, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var apm types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.apm, ApmSchema().FrameworkType(), &apm) + assert.Nil(t, diags) + + if got, diags := ApmPayload(context.Background(), apm, tt.args.tpl); tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_read.go b/ec/ecresource/deploymentresource/apm/v2/apm_read.go new file mode 100644 index 000000000..23977e43f --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read.go @@ -0,0 +1,97 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type Apm struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *ApmConfig `tfsdk:"config"` +} + +func ReadApms(in []*models.ApmResourceInfo) (*Apm, error) { + for _, model := range in { + if util.IsCurrentApmPlanEmpty(model) || IsApmStopped(model) { + continue + } + + apm, err := ReadApm(model) + if err != nil { + return nil, err + } + + return apm, nil + } + + return nil, nil +} + +func ReadApm(in *models.ApmResourceInfo) (*Apm, error) { + var apm Apm + + apm.RefId = in.RefID + apm.ResourceId = in.Info.ID + apm.Region = in.Region + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := readApmTopologies(plan.ClusterTopology) + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + apm.InstanceConfigurationId = topologies[0].InstanceConfigurationId + apm.Size = topologies[0].Size + apm.SizeResource = topologies[0].SizeResource + apm.ZoneCount = topologies[0].ZoneCount + } + + apm.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + apm.HttpEndpoint, apm.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + configs, err := readApmConfigs(plan.Apm) + if err != nil { + return nil, err + } + + if len(configs) > 0 { + apm.Config = &configs[0] + } + + return &apm, nil +} + +// IsApmStopped returns true if the resource is stopped. +func IsApmStopped(res *models.ApmResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/apm_flatteners_test.go b/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go similarity index 65% rename from ec/ecresource/deploymentresource/apm_flatteners_test.go rename to ec/ecresource/deploymentresource/apm/v2/apm_read_test.go index 68a07428a..0438c506d 100644 --- a/ec/ecresource/deploymentresource/apm_flatteners_test.go +++ b/ec/ecresource/deploymentresource/apm/v2/apm_read_test.go @@ -15,31 +15,39 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" ) -func Test_flattenApmResource(t *testing.T) { +func Test_readApm(t *testing.T) { type args struct { - in []*models.ApmResourceInfo - name string + in []*models.ApmResourceInfo } + tests := []struct { - name string - args args - want []interface{} + name string + args args + want *Apm + diags diag.Diagnostics }{ { - name: "empty resource list returns empty list", - args: args{in: []*models.ApmResourceInfo{}}, - want: []interface{}{}, + name: "empty resource list returns empty list", + args: args{in: []*models.ApmResourceInfo{}}, + want: nil, + diags: nil, }, { name: "empty current plan returns empty list", @@ -52,7 +60,8 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, + diags: nil, }, { name: "parses the apm resource", @@ -93,23 +102,17 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }, - }, - }, + want: &Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://apmresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://apmresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.apm.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, }, }, { @@ -204,26 +207,24 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - }}, + want: &Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://apmresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://apmresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.apm.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &v1.ApmConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + }, }, { name: "parses the apm resource with config overrides and system settings", @@ -275,33 +276,67 @@ func Test_flattenApmResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://apmresource.cloud.elastic.co:9200", - "https_endpoint": "https://apmresource.cloud.elastic.co:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", + want: &Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://apmresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://apmresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.apm.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &v1.ApmConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + apms, err := ReadApms(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, apms) - "debug_enabled": true, - }}, - }}, + var apmTF types.Object + diags := tfsdk.ValueFrom(context.Background(), apms, ApmSchema().FrameworkType(), &apmTF) + assert.Nil(t, diags) + }) + } +} + +func Test_IsApmResourceStopped(t *testing.T) { + type args struct { + res *models.ApmResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ + Status: ec.String("stopped"), + }}}, + want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenApmResources(tt.args.in, tt.args.name) + got := IsApmStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/apm/v2/apm_topology.go b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go new file mode 100644 index 000000000..5543ef20f --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/apm_topology.go @@ -0,0 +1,135 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +const ( + minimumApmSize = 512 +) + +func readApmTopology(in *models.ApmTopologyElement) (*v1.Topology, error) { + var top v1.Topology + + if in.InstanceConfigurationID != "" { + top.InstanceConfigurationId = &in.InstanceConfigurationID + } + + if in.Size != nil { + top.Size = ec.String(util.MemoryToState(*in.Size.Value)) + top.SizeResource = ec.String(*in.Size.Resource) + } + + top.ZoneCount = int(in.ZoneCount) + + return &top, nil +} + +func readApmTopologies(in []*models.ApmTopologyElement) (v1.Topologies, error) { + topologies := make([]v1.Topology, 0, len(in)) + + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + topology, err := readApmTopology(model) + if err != nil { + return nil, nil + } + + topologies = append(topologies, *topology) + } + + return topologies, nil +} + +// defaultApmTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is smaller than the +// deployment template default, the same is done on the ZoneCount. +func defaultApmTopology(topology []*models.ApmTopologyElement) []*models.ApmTopologyElement { + for _, t := range topology { + if *t.Size.Value < minimumApmSize { + t.Size.Value = ec.Int32(minimumApmSize) + } + if t.ZoneCount < utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func apmTopologyPayload(ctx context.Context, topology v1.TopologyTF, planModels []*models.ApmTopologyElement, index int) (*models.ApmTopologyElement, diag.Diagnostics) { + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) + + var diags diag.Diagnostics + if err != nil { + diags.AddError("size parsing error", err.Error()) + return nil, diags + } + + topologyElem, err := matchApmTopology(icID, planModels) + if err != nil { + diags.AddError("cannot match topology element", err.Error()) + return nil, diags + } + + if size != nil { + topologyElem.Size = size + } + + if topology.ZoneCount.Value > 0 { + topologyElem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return topologyElem, nil +} + +func matchApmTopology(id string, topologies []*models.ApmTopologyElement) (*models.ApmTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `apm topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} diff --git a/ec/ecresource/deploymentresource/apm/v2/schema.go b/ec/ecresource/deploymentresource/apm/v2/schema.go new file mode 100644 index 000000000..d73d2d67e --- /dev/null +++ b/ec/ecresource/deploymentresource/apm/v2/schema.go @@ -0,0 +1,143 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ApmConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optionally define the Apm configuration options for the APM Server`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the APM nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", + Optional: true, + }, + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for APM servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + } +} + +func ApmSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional APM resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-apm"}), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + }, + "region": { + Type: types.StringType, + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "config": ApmConfigSchema(), + }), + } +} diff --git a/ec/ecresource/deploymentresource/apm_expanders.go b/ec/ecresource/deploymentresource/apm_expanders.go deleted file mode 100644 index 0c118723b..000000000 --- a/ec/ecresource/deploymentresource/apm_expanders.go +++ /dev/null @@ -1,213 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandApmResources expands apm resources into their models. -func expandApmResources(apms []interface{}, tpl *models.ApmPayload) ([]*models.ApmPayload, error) { - if len(apms) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") - } - - result := make([]*models.ApmPayload, 0, len(apms)) - for _, raw := range apms { - resResource, err := expandApmResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandApmResource(raw interface{}, res *models.ApmPayload) (*models.ApmPayload, error) { - var apm = raw.(map[string]interface{}) - - if esRefID, ok := apm["elasticsearch_cluster_ref_id"].(string); ok { - res.ElasticsearchClusterRefID = ec.String(esRefID) - } - - if refID, ok := apm["ref_id"].(string); ok { - res.RefID = ec.String(refID) - } - - if region, ok := apm["region"].(string); ok && region != "" { - res.Region = ec.String(region) - } - - if cfg, ok := apm["config"].([]interface{}); ok { - if err := expandApmConfig(cfg, res.Plan.Apm); err != nil { - return nil, err - } - } - - if rt, ok := apm["topology"].([]interface{}); ok && len(rt) > 0 { - topology, err := expandApmTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultApmTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandApmTopology(rawTopologies []interface{}, topologies []*models.ApmTopologyElement) ([]*models.ApmTopologyElement, error) { - res := make([]*models.ApmTopologyElement, 0, len(rawTopologies)) - - for i, rawTop := range rawTopologies { - topology, ok := rawTop.(map[string]interface{}) - if !ok { - continue - } - - var icID string - if id, ok := topology["instance_configuration_id"].(string); ok { - icID = id - } - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultApmTopology(topologies); icID == "" && len(t) > i { - icID = t[i].InstanceConfigurationID - } - - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchApmTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"].(int); ok && zones > 0 { - elem.ZoneCount = int32(zones) - } - - res = append(res, elem) - } - - return res, nil -} - -func expandApmConfig(raw []interface{}, res *models.ApmConfiguration) error { - for _, rawCfg := range raw { - cfg, ok := rawCfg.(map[string]interface{}) - if !ok { - continue - } - - if debugEnabled, ok := cfg["debug_enabled"].(bool); ok { - if res.SystemSettings == nil { - res.SystemSettings = &models.ApmSystemSettings{} - } - res.SystemSettings.DebugEnabled = ec.Bool(debugEnabled) - } - - if settings, ok := cfg["user_settings_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding apm user_settings_json: %w", err) - } - } - if settings, ok := cfg["user_settings_override_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding apm user_settings_override_json: %w", err) - } - } - if settings, ok := cfg["user_settings_yaml"].(string); ok && settings != "" { - res.UserSettingsYaml = settings - } - if settings, ok := cfg["user_settings_override_yaml"].(string); ok && settings != "" { - res.UserSettingsOverrideYaml = settings - } - - if v, ok := cfg["docker_image"].(string); ok { - res.DockerImage = v - } - } - - return nil -} - -// defaultApmTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is smaller than the -// deployment template default, the same is done on the ZoneCount. -func defaultApmTopology(topology []*models.ApmTopologyElement) []*models.ApmTopologyElement { - for _, t := range topology { - if *t.Size.Value < minimumApmSize { - t.Size.Value = ec.Int32(minimumApmSize) - } - if t.ZoneCount < minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchApmTopology(id string, topologies []*models.ApmTopologyElement) (*models.ApmTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `apm topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// apmResource returns the ApmPayload from a deployment -// template or an empty version of the payload. -func apmResource(res *models.DeploymentTemplateInfoV2) *models.ApmPayload { - if len(res.DeploymentTemplate.Resources.Apm) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.Apm[0] -} - -// apmResourceFromUpdate returns the ApmPayload from a deployment -// update request or an empty version of the payload. -func apmResourceFromUpdate(res *models.DeploymentUpdateResources) *models.ApmPayload { - if len(res.Apm) == 0 { - return nil - } - return res.Apm[0] -} diff --git a/ec/ecresource/deploymentresource/apm_expanders_test.go b/ec/ecresource/deploymentresource/apm_expanders_test.go deleted file mode 100644 index ec6a00bde..000000000 --- a/ec/ecresource/deploymentresource/apm_expanders_test.go +++ /dev/null @@ -1,328 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" -) - -func Test_expandApmResources(t *testing.T) { - tplPath := "testdata/template-aws-io-optimized-v2.json" - tpl := func() *models.ApmPayload { - return apmResource(parseDeploymentTemplate(t, - tplPath, - )) - } - type args struct { - ess []interface{} - tpl *models.ApmPayload - } - tests := []struct { - name string - args args - want []*models.ApmPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses an APM resource with explicit topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an APM resource with invalid instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "so invalid", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`apm topology: invalid instance_configuration_id: "so invalid" doesn't match any of the deployment template instance configurations`), - }, - { - name: "parses an APM resource with no topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }, - }, - }, - want: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - }, - { - name: "parses an APM resource with a topology element but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }}, - }, - }, - }, - want: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an APM resource with multiple topologies element but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{ - map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }, map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }, - }, - }, - }, - }, - err: errors.New("apm topology: invalid instance_configuration_id: \"\" doesn't match any of the deployment template instance configurations"), - }, - { - name: "parses an APM resource with explicit topology and some config", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-apm", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\": \"value\"}", - "user_settings_override_json": "{\"some.setting\": \"value2\"}", - "debug_enabled": true, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - want: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("tertiary-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(true), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }}, - }, - { - name: "parses an APM resource with explicit nils", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-apm", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": nil, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": nil, - "user_settings_override_yaml": nil, - "user_settings_json": nil, - "user_settings_override_json": nil, - "debug_enabled": nil, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - want: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("us-east-1"), - RefID: ec.String("tertiary-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }}, - }, - { - name: "tries to parse an apm resource when the template doesn't have an APM instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-apm", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - "config": []interface{}{map[string]interface{}{ - "debug_enabled": true, - }}, - }}, - }, - err: errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandApmResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/apm_flatteners.go b/ec/ecresource/deploymentresource/apm_flatteners.go deleted file mode 100644 index c16c430da..000000000 --- a/ec/ecresource/deploymentresource/apm_flatteners.go +++ /dev/null @@ -1,154 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenApmResources flattens apm resources into its flattened structure. -func flattenApmResources(in []*models.ApmResourceInfo, name string) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) - if util.IsCurrentApmPlanEmpty(res) || isApmResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ID != nil && *res.Info.ID != "" { - m["resource_id"] = *res.Info.ID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenApmTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - if cfg := flattenApmConfig(plan.Apm); len(cfg) > 0 { - m["config"] = cfg - } - - result = append(result, m) - } - - return result -} - -func flattenApmTopology(plan *models.ApmPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenApmConfig(cfg *models.ApmConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - for k, v := range flattenApmSystemConfig(cfg.SystemSettings) { - m[k] = v - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func flattenApmSystemConfig(cfg *models.ApmSystemSettings) map[string]interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.DebugEnabled != nil { - m["debug_enabled"] = *cfg.DebugEnabled - } - - if len(m) == 0 { - return nil - } - - return m -} diff --git a/ec/ecresource/deploymentresource/create.go b/ec/ecresource/deploymentresource/create.go index e8690d32c..5930e3761 100644 --- a/ec/ecresource/deploymentresource/create.go +++ b/ec/ecresource/deploymentresource/create.go @@ -21,62 +21,76 @@ import ( "context" "fmt" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-log/tflog" ) -// createResource will createResource a new deployment from the specified settings. -func createResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - reqID := deploymentapi.RequestID(d.Get("request_id").(string)) +func (r *Resource) Create(ctx context.Context, req resource.CreateRequest, resp *resource.CreateResponse) { + if !r.ready(&resp.Diagnostics) { + return + } - req, err := createResourceToModel(d, client) - if err != nil { - return diag.FromErr(err) + var config v2.DeploymentTF + diags := req.Config.Get(ctx, &config) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + var plan v2.DeploymentTF + diags = req.Plan.Get(ctx, &plan) + resp.Diagnostics.Append(diags...) + if resp.Diagnostics.HasError() { + return + } + + request, diags := plan.CreateRequest(ctx, r.client) + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return } + requestId := deploymentapi.RequestID(plan.RequestId.Value) + res, err := deploymentapi.Create(deploymentapi.CreateParams{ - API: client, - RequestID: reqID, - Request: req, + API: r.client, + RequestID: requestId, + Request: request, Overrides: &deploymentapi.PayloadOverrides{ - Name: d.Get("name").(string), - Version: d.Get("version").(string), - Region: d.Get("region").(string), + Name: plan.Name.Value, + Version: plan.Version.Value, + Region: plan.Region.Value, }, }) + if err != nil { - merr := multierror.NewPrefixed("failed creating deployment", err) - return diag.FromErr(merr.Append(newCreationError(reqID))) + resp.Diagnostics.AddError("failed creating deployment", err.Error()) + resp.Diagnostics.AddError("failed creating deployment", newCreationError(requestId).Error()) + return } - if err := WaitForPlanCompletion(client, *res.ID); err != nil { - merr := multierror.NewPrefixed("failed tracking create progress", err) - return diag.FromErr(merr.Append(newCreationError(reqID))) + if err := WaitForPlanCompletion(r.client, *res.ID); err != nil { + resp.Diagnostics.AddError("failed tracking create progress", newCreationError(requestId).Error()) + return } - d.SetId(*res.ID) + tflog.Trace(ctx, "created deployment resource") - // Since before the deployment has been read, there's no real state - // persisted, it'd better to handle each of the errors by appending - // it to the `diag.Diagnostics` since it has support for it. - var diags diag.Diagnostics - if err := handleRemoteClusters(d, client); err != nil { - diags = append(diags, diag.FromErr(err)...) - } + resp.Diagnostics.Append(v2.HandleRemoteClusters(ctx, r.client, *res.ID, plan.Elasticsearch)...) - if diag := readResource(ctx, d, meta); diag != nil { - diags = append(diags, diags...) - } + deployment, diags := r.read(ctx, *res.ID, nil, plan, res.Resources) + + resp.Diagnostics.Append(diags...) - if err := parseCredentials(d, res.Resources); err != nil { - diags = append(diags, diag.FromErr(err)...) + if deployment == nil { + resp.Diagnostics.AddError("cannot read just created resource", "") + resp.State.RemoveResource(ctx) + return } - return diags + resp.Diagnostics.Append(resp.State.Set(ctx, deployment)...) } func newCreationError(reqID string) error { diff --git a/ec/ecresource/deploymentresource/delete.go b/ec/ecresource/deploymentresource/delete.go index 8a8a63c97..beacc735c 100644 --- a/ec/ecresource/deploymentresource/delete.go +++ b/ec/ecresource/deploymentresource/delete.go @@ -20,82 +20,53 @@ package deploymentresource import ( "context" "errors" - "strings" - "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -// Delete shuts down and deletes the remote deployment retrying up to 3 times -// the Shutdown API call in case the plan returns with a failure that contains -// the "Timeout Exceeded" string, which is a fairly common transient error state -// returned from the API. -func deleteResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - const maxRetries = 3 - var retries int - timeout := d.Timeout(schema.TimeoutDelete) - client := meta.(*api.API) +func (r *Resource) Delete(ctx context.Context, req resource.DeleteRequest, resp *resource.DeleteResponse) { + if !r.ready(&resp.Diagnostics) { + return + } - return diag.FromErr(resource.RetryContext(ctx, timeout, func() *resource.RetryError { - if _, err := deploymentapi.Shutdown(deploymentapi.ShutdownParams{ - API: client, DeploymentID: d.Id(), - }); err != nil { - if alreadyDestroyed(err) { - d.SetId("") - return nil - } - return resource.NonRetryableError(multierror.NewPrefixed( - "failed shutting down the deployment", err, - )) - } + var state deploymentv2.DeploymentTF - if err := WaitForPlanCompletion(client, d.Id()); err != nil { - if shouldRetryShutdown(err, retries, maxRetries) { - retries++ - return resource.RetryableError(err) - } - return resource.NonRetryableError(err) - } + diags := req.State.Get(ctx, &state) + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + //TODO retries - if err := handleTrafficFilterChange(d, client); err != nil { - return resource.NonRetryableError(err) + if _, err := deploymentapi.Shutdown(deploymentapi.ShutdownParams{ + API: r.client, DeploymentID: state.Id.Value, + }); err != nil { + if alreadyDestroyed(err) { + return } + } - // We don't particularly care if delete succeeds or not. It's better to - // remove it, but it might fail on ESS. For example, when user's aren't - // allowed to delete deployments, or on ECE when the cluster is "still - // being shutdown". Sumarizing, even if the call fails the deployment - // won't be there. - _, _ = deploymentapi.Delete(deploymentapi.DeleteParams{ - API: client, DeploymentID: d.Id(), - }) + if err := WaitForPlanCompletion(r.client, state.Id.Value); err != nil { + resp.Diagnostics.AddError("deployment deletion error", err.Error()) + return + } - d.SetId("") - return nil - })) + // We don't particularly care if delete succeeds or not. It's better to + // remove it, but it might fail on ESS. For example, when user's aren't + // allowed to delete deployments, or on ECE when the cluster is "still + // being shutdown". Sumarizing, even if the call fails the deployment + // won't be there. + _, _ = deploymentapi.Delete(deploymentapi.DeleteParams{ + API: r.client, DeploymentID: state.Id.Value, + }) } func alreadyDestroyed(err error) bool { var destroyed *deployments.ShutdownDeploymentNotFound return errors.As(err, &destroyed) } - -func shouldRetryShutdown(err error, retries, maxRetries int) bool { - const timeout = "Timeout exceeded" - needsRetry := retries < maxRetries - - var isTimeout, isFailDeallocate bool - if err != nil { - isTimeout = strings.Contains(err.Error(), timeout) - isFailDeallocate = strings.Contains( - err.Error(), "Some instances were not stopped", - ) - } - return (needsRetry && isTimeout) || - (needsRetry && isFailDeallocate) -} diff --git a/ec/ecresource/deploymentresource/delete_test.go b/ec/ecresource/deploymentresource/delete_test.go deleted file mode 100644 index 7dba42c31..000000000 --- a/ec/ecresource/deploymentresource/delete_test.go +++ /dev/null @@ -1,213 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_deleteResource(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - type args struct { - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "failed shutting down the deployment: 1 error occurred:\n\t* api error: some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := deleteResource(context.Background(), tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} - -func Test_shouldRetryShutdown(t *testing.T) { - type args struct { - err error - retries int - maxRetries int - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "returns false when error doesn't contain timeout string", - args: args{ - err: errors.New("some error"), - retries: 1, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns false when the error is nil", - args: args{ - retries: 1, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns false when error doesn't contain timeout string", - args: args{ - err: errors.New("timeout exceeded"), - retries: 1, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns true when error contains timeout string", - args: args{ - err: errors.New("Timeout exceeded"), - retries: 1, - maxRetries: 10, - }, - want: true, - }, - { - name: "returns true when error contains timeout string", - args: args{ - err: multierror.NewPrefixed("aa", - errors.New("Timeout exceeded"), - ), - retries: 1, - maxRetries: 10, - }, - want: true, - }, - { - name: "returns true when error contains a deallocation failure string", - args: args{ - err: multierror.NewPrefixed("aa", - errors.New(`deployment [8f3c85f97536163ad117a6d37b377120] - [elasticsearch][39dd873845bc43f9b3b21b87fe1a3c99]: caught error: "Plan change failed: Some instances were not stopped`), - ), - retries: 1, - maxRetries: 10, - }, - want: true, - }, - { - name: "returns false when error contains timeout string but exceeds max timeouts", - args: args{ - err: errors.New("Timeout exceeded"), - retries: 10, - maxRetries: 10, - }, - want: false, - }, - { - name: "returns false when error contains a deallocation failure string", - args: args{ - err: multierror.NewPrefixed("aa", - errors.New(`deployment [8f3c85f97536163ad117a6d37b377120] - [elasticsearch][39dd873845bc43f9b3b21b87fe1a3c99]: caught error: "Plan change failed: Some instances were not stopped`), - ), - retries: 10, - maxRetries: 10, - }, - want: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := shouldRetryShutdown(tt.args.err, tt.args.retries, tt.args.maxRetries) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/deployment/v1/deployment.go b/ec/ecresource/deploymentresource/deployment/v1/deployment.go new file mode 100644 index 000000000..68ace68dc --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v1/deployment.go @@ -0,0 +1,71 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" + + apmv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + elasticsearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + enterprisesearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + integrationsserverv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v1" + kibanav1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + observabilityv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" +) + +type DeploymentTF struct { + Id types.String `tfsdk:"id"` + Alias types.String `tfsdk:"alias"` + Version types.String `tfsdk:"version"` + Region types.String `tfsdk:"region"` + DeploymentTemplateId types.String `tfsdk:"deployment_template_id"` + Name types.String `tfsdk:"name"` + RequestId types.String `tfsdk:"request_id"` + ElasticsearchUsername types.String `tfsdk:"elasticsearch_username"` + ElasticsearchPassword types.String `tfsdk:"elasticsearch_password"` + ApmSecretToken types.String `tfsdk:"apm_secret_token"` + TrafficFilter types.Set `tfsdk:"traffic_filter"` + Tags types.Map `tfsdk:"tags"` + Elasticsearch types.List `tfsdk:"elasticsearch"` + Kibana types.List `tfsdk:"kibana"` + Apm types.List `tfsdk:"apm"` + IntegrationsServer types.List `tfsdk:"integrations_server"` + EnterpriseSearch types.List `tfsdk:"enterprise_search"` + Observability types.List `tfsdk:"observability"` +} + +type Deployment struct { + Id string `tfsdk:"id"` + Alias string `tfsdk:"alias"` + Version string `tfsdk:"version"` + Region string `tfsdk:"region"` + DeploymentTemplateId string `tfsdk:"deployment_template_id"` + Name string `tfsdk:"name"` + RequestId string `tfsdk:"request_id"` + ElasticsearchUsername string `tfsdk:"elasticsearch_username"` + ElasticsearchPassword string `tfsdk:"elasticsearch_password"` + ApmSecretToken *string `tfsdk:"apm_secret_token"` + TrafficFilter []string `tfsdk:"traffic_filter"` + Tags map[string]string `tfsdk:"tags"` + Elasticsearch elasticsearchv1.Elasticsearches `tfsdk:"elasticsearch"` + Kibana kibanav1.Kibanas `tfsdk:"kibana"` + Apm apmv1.Apms `tfsdk:"apm"` + IntegrationsServer integrationsserverv1.IntegrationsServers `tfsdk:"integrations_server"` + EnterpriseSearch enterprisesearchv1.EnterpriseSearches `tfsdk:"enterprise_search"` + Observability observabilityv1.Observabilities `tfsdk:"observability"` +} diff --git a/ec/ecresource/deploymentresource/deployment/v1/schema.go b/ec/ecresource/deploymentresource/deployment/v1/schema.go new file mode 100644 index 000000000..441a165a3 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v1/schema.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + apmv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v1" + elasticsearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + enterprisesearchv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + integrationsserverv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v1" + kibanav1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + observabilityv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" +) + +func DeploymentSchema() tfsdk.Schema { + return tfsdk.Schema{ + Version: 1, + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Elastic Cloud Deployment resource", + + Attributes: map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this resource.", + }, + "alias": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "version": { + Type: types.StringType, + Description: "Elastic Stack version to use for all of the deployment resources.", + Required: true, + }, + "region": { + Type: types.StringType, + Description: `Region when the deployment should be hosted. For ECE environments this should be set to "ece-region".`, + Required: true, + }, + "deployment_template_id": { + Type: types.StringType, + Description: "Deployment Template identifier to base the deployment from.", + Required: true, + }, + "name": { + Type: types.StringType, + Description: "Name for the deployment.", + Optional: true, + }, + "request_id": { + Type: types.StringType, + Description: "request_id to set on the create operation, only used when a previous create attempt returns an error including a request_id.", + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "elasticsearch_username": { + Type: types.StringType, + Description: "Username for authenticating to the Elasticsearch resource.", + Computed: true, + }, + "elasticsearch_password": { + Type: types.StringType, + Description: "Password for authenticating to the Elasticsearch resource", + Computed: true, + Sensitive: true, + }, + "apm_secret_token": { + Type: types.StringType, + Computed: true, + Sensitive: true, + }, + "traffic_filter": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + Description: "Optional list of traffic filters to apply to this deployment.", + }, + "tags": { + Description: "Optional map of deployment tags", + Type: types.MapType{ + ElemType: types.StringType, + }, + Optional: true, + }, + "elasticsearch": elasticsearchv1.ElasticsearchSchema(), + "kibana": kibanav1.KibanaSchema(), + "apm": apmv1.ApmSchema(), + "integrations_server": integrationsserverv1.IntegrationsServerSchema(), + "enterprise_search": enterprisesearchv1.EnterpriseSearchSchema(), + "observability": observabilityv1.ObservabilitySchema(), + }, + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go new file mode 100644 index 000000000..d62aeb4cd --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload.go @@ -0,0 +1,231 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type DeploymentTF struct { + Id types.String `tfsdk:"id"` + Alias types.String `tfsdk:"alias"` + Version types.String `tfsdk:"version"` + Region types.String `tfsdk:"region"` + DeploymentTemplateId types.String `tfsdk:"deployment_template_id"` + Name types.String `tfsdk:"name"` + RequestId types.String `tfsdk:"request_id"` + ElasticsearchUsername types.String `tfsdk:"elasticsearch_username"` + ElasticsearchPassword types.String `tfsdk:"elasticsearch_password"` + ApmSecretToken types.String `tfsdk:"apm_secret_token"` + TrafficFilter types.Set `tfsdk:"traffic_filter"` + Tags types.Map `tfsdk:"tags"` + Elasticsearch types.Object `tfsdk:"elasticsearch"` + Kibana types.Object `tfsdk:"kibana"` + Apm types.Object `tfsdk:"apm"` + IntegrationsServer types.Object `tfsdk:"integrations_server"` + EnterpriseSearch types.Object `tfsdk:"enterprise_search"` + Observability types.Object `tfsdk:"observability"` +} + +func (dep DeploymentTF) CreateRequest(ctx context.Context, client *api.API) (*models.DeploymentCreateRequest, diag.Diagnostics) { + var result = models.DeploymentCreateRequest{ + Name: dep.Name.Value, + Alias: dep.Alias.Value, + Resources: &models.DeploymentCreateResources{}, + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{}, + } + + dtID := dep.DeploymentTemplateId.Value + version := dep.Version.Value + + var diagsnostics diag.Diagnostics + + template, err := deptemplateapi.Get(deptemplateapi.GetParams{ + API: client, + TemplateID: dtID, + Region: dep.Region.Value, + HideInstanceConfigurations: true, + }) + if err != nil { + diagsnostics.AddError("Deployment template get error", err.Error()) + return nil, diagsnostics + } + + useNodeRoles, err := elasticsearchv2.CompatibleWithNodeRoles(version) + if err != nil { + diagsnostics.AddError("Deployment parse error", err.Error()) + return nil, diagsnostics + } + + elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, dep.Elasticsearch, template, dtID, version, useNodeRoles, false) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if elasticsearchPayload != nil { + result.Resources.Elasticsearch = []*models.ElasticsearchPayload{elasticsearchPayload} + } + + kibanaPayload, diags := kibanav2.KibanaPayload(ctx, dep.Kibana, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if kibanaPayload != nil { + result.Resources.Kibana = []*models.KibanaPayload{kibanaPayload} + } + + apmPayload, diags := apmv2.ApmPayload(ctx, dep.Apm, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if apmPayload != nil { + result.Resources.Apm = []*models.ApmPayload{apmPayload} + } + + integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, dep.IntegrationsServer, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if integrationsServerPayload != nil { + result.Resources.IntegrationsServer = []*models.IntegrationsServerPayload{integrationsServerPayload} + } + + enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, dep.EnterpriseSearch, template) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if enterpriseSearchPayload != nil { + result.Resources.EnterpriseSearch = []*models.EnterpriseSearchPayload{enterpriseSearchPayload} + } + + if diags := trafficFilterToModel(ctx, dep.TrafficFilter, &result); diags.HasError() { + diagsnostics.Append(diags...) + } + + observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, dep.Observability, client) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + result.Settings.Observability = observabilityPayload + + result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, dep.Tags) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + return &result, diagsnostics +} + +// trafficFilterToModel expands the flattened "traffic_filter" settings to a DeploymentCreateRequest. +func trafficFilterToModel(ctx context.Context, set types.Set, req *models.DeploymentCreateRequest) diag.Diagnostics { + if len(set.Elems) == 0 || req == nil { + return nil + } + + if req.Settings == nil { + req.Settings = &models.DeploymentCreateSettings{} + } + + if req.Settings.TrafficFilterSettings == nil { + req.Settings.TrafficFilterSettings = &models.TrafficFilterSettings{} + } + + var rulesets []string + if diags := tfsdk.ValueAs(ctx, set, &rulesets); diags.HasError() { + return diags + } + + req.Settings.TrafficFilterSettings.Rulesets = append( + req.Settings.TrafficFilterSettings.Rulesets, + rulesets..., + ) + + return nil +} + +func HandleRemoteClusters(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) diag.Diagnostics { + remoteClusters, refId, diags := elasticsearchRemoteClustersPayload(ctx, client, deploymentId, esObj) + + if diags.HasError() { + return diags + } + + if err := esremoteclustersapi.Update(esremoteclustersapi.UpdateParams{ + API: client, + DeploymentID: deploymentId, + RefID: refId, + RemoteResources: remoteClusters, + }); err != nil { + diags.AddError("cannot update remote clusters", err.Error()) + return diags + } + + return nil +} + +func elasticsearchRemoteClustersPayload(ctx context.Context, client *api.API, deploymentId string, esObj types.Object) (*models.RemoteResources, string, diag.Diagnostics) { + var es *elasticsearchv2.ElasticsearchTF + + diags := tfsdk.ValueAs(ctx, esObj, &es) + + if diags.HasError() { + return nil, "", diags + } + + if es == nil { + var diags diag.Diagnostics + diags.AddError("failed create remote clusters payload", "there is no elasticsearch") + return nil, "", diags + } + + remoteRes, diags := elasticsearchv2.ElasticsearchRemoteClustersPayload(ctx, es.RemoteCluster) + if diags.HasError() { + return nil, "", diags + } + + return remoteRes, es.RefId.Value, nil +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go new file mode 100644 index 000000000..e601e0a35 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_create_payload_test.go @@ -0,0 +1,3104 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "io" + "os" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/stretchr/testify/assert" +) + +func fileAsResponseBody(t *testing.T, name string) io.ReadCloser { + t.Helper() + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + var buf = new(bytes.Buffer) + if _, err := io.Copy(buf, f); err != nil { + t.Fatal(err) + } + buf.WriteString("\n") + + return io.NopCloser(buf) +} + +func Test_createRequest(t *testing.T) { + defaultHotTier := elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ) + + defaultElasticsearch := &elasticsearchv2.Elasticsearch{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + } + + sampleKibana := &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + ZoneCount: 1, + } + + sampleApm := &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + ZoneCount: 1, + } + + sampleEnterpriseSearch := &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + } + + sampleObservability := &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + } + + sampleDeployment := Deployment{ + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.11.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: sampleKibana, + Apm: sampleApm, + EnterpriseSearch: sampleEnterpriseSearch, + Observability: sampleObservability, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + } + + sampleElasticsearch := &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + } + + sampleLegacyDeployment := Deployment{ + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: sampleElasticsearch, + Kibana: sampleKibana, + Apm: sampleApm, + EnterpriseSearch: sampleEnterpriseSearch, + Observability: sampleObservability, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + } + + ioOptimizedTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-io-optimized-v2.json") + } + + hotWarmTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-hot-warm-v2.json") + } + + ccsTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-cross-cluster-search-v2.json") + } + + emptyTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-empty.json") + } + + type args struct { + plan Deployment + client *api.API + } + tests := []struct { + name string + args args + want *models.DeploymentCreateRequest + diags diag.Diagnostics + }{ + { + name: "parses the resources", + args: args{ + plan: sampleDeployment, + client: api.NewMock( + mock.New200Response(hotWarmTpl()), + mock.New200Response( + mock.NewStructBody(models.DeploymentGetResponse{ + Healthy: ec.Bool(true), + ID: ec.String(mock.ValidClusterID), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + ID: ec.String(mock.ValidClusterID), + RefID: ec.String("main-elasticsearch"), + }}, + }, + }), + ), + ), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.1", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the legacy resources", + args: args{ + plan: sampleLegacyDeployment, + client: api.NewMock( + mock.New200Response(ioOptimizedTpl()), + mock.New200Response( + mock.NewStructBody(models.DeploymentGetResponse{ + Healthy: ec.Bool(true), + ID: ec.String(mock.ValidClusterID), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + ID: ec.String(mock.ValidClusterID), + RefID: ec.String("main-elasticsearch"), + }}, + }, + }), + ), + ), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (IO Optimized)", + args: args{ + plan: Deployment{ + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + // Ref ids are taken from template, not from defaults values in this test. + // Defaults are processed by TF during config processing. + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("apm-ref-id"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("enterprise_search-ref-id"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (IO Optimized) with node_roles", + args: args{ + plan: Deployment{ + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.11.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + // Ref ids are taken from template, not from defaults values in this test. + // Defaults are processed by TF during config processing. + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("apm-ref-id"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("enterprise_search-ref-id"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with topology overrides (size)", + args: args{ + + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("4g"), + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with topology overrides (IC)", + args: args{ + + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + Settings: &models.DeploymentCreateSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Curation: nil, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Curation: nil, + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(false), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm) with node_roles", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Curation: nil, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Curation: nil, + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm) with node_roles and extensions", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + Extension: elasticsearchv2.ElasticsearchExtensions{ + { + Name: "my-plugin", + Type: "plugin", + Url: "repo://12311234", + Version: "7.7.0", + }, + { + Name: "my-second-plugin", + Type: "plugin", + Url: "repo://12311235", + Version: "7.7.0", + }, + { + Name: "my-bundle", + Type: "bundle", + Url: "repo://1231122", + Version: "7.7.0", + }, + { + Name: "my-second-bundle", + Type: "bundle", + Url: "repo://1231123", + Version: "7.7.0", + }, + }, + }, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + UserBundles: []*models.ElasticsearchUserBundle{ + { + URL: ec.String("repo://1231122"), + Name: ec.String("my-bundle"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + { + URL: ec.String("repo://1231123"), + Name: ec.String("my-second-bundle"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + }, + UserPlugins: []*models.ElasticsearchUserPlugin{ + { + URL: ec.String("repo://12311234"), + Name: ec.String("my-plugin"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + { + URL: ec.String("repo://12311235"), + Name: ec.String("my-second-plugin"), + ElasticsearchVersion: ec.String("7.7.0"), + }, + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with autoscaling enabled", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.Bool(true), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(true), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with autoscaling enabled and custom policies set", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.Bool(true), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("232g"), + }, + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("116g"), + }, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(true), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(237568), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with dedicated master and cold tiers", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("1g"), + ZoneCount: 3, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "master", + ZoneCount: 3, + InstanceConfigurationID: "aws.master.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeRoles: []string{ + "master", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with dedicated coordinating and cold tiers", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "coordinating", + ZoneCount: 2, + InstanceConfigurationID: "aws.coordinating.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "ingest", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with dedicated coordinating, master and cold tiers", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("1g"), + ZoneCount: 3, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "coordinating", + ZoneCount: 2, + InstanceConfigurationID: "aws.coordinating.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "ingest", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "master", + ZoneCount: 3, + InstanceConfigurationID: "aws.master.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeRoles: []string{ + "master", + "remote_cluster_client", + }, + // Elasticsearch: &models.ElasticsearchConfiguration{}, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "deployment with docker_image overrides", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.14.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Config: &elasticsearchv2.ElasticsearchConfig{ + DockerImage: ec.String("docker.elastic.com/elasticsearch/container:7.14.1-hash"), + }, + Autoscale: ec.Bool(false), + TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ + { + AccountId: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &kibanav2.KibanaConfig{ + DockerImage: ec.String("docker.elastic.com/kibana/container:7.14.1-hash"), + }, + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &apmv2.ApmConfig{ + DockerImage: ec.String("docker.elastic.com/apm/container:7.14.1-hash"), + }, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &enterprisesearchv2.EnterpriseSearchConfig{ + DockerImage: ec.String("docker.elastic.com/enterprise_search/container:7.14.1-hash"), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Trust: &models.ElasticsearchClusterTrustSettings{ + Accounts: []*models.AccountTrustRelationship{ + { + AccountID: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + }, + }, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/elasticsearch/container:7.14.1-hash", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Apm: []*models.ApmPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + DockerImage: "docker.elastic.com/apm/container:7.14.1-hash", + // SystemSettings: &models.ApmSystemSettings{ + // DebugEnabled: ec.Bool(false), + // }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + ZoneCount: 1, + }}, + }, + RefID: ec.String("main-apm"), + Region: ec.String("us-east-1"), + }}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + DockerImage: "docker.elastic.com/kibana/container:7.14.1-hash", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{{ + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + ZoneCount: 1, + }}, + }, + RefID: ec.String("main-kibana"), + Region: ec.String("us-east-1"), + }}, + EnterpriseSearch: []*models.EnterpriseSearchPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + DockerImage: "docker.elastic.com/enterprise_search/container:7.14.1-hash", + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + ZoneCount: 2, + }}, + }, + RefID: ec.String("main-enterprise_search"), + Region: ec.String("us-east-1"), + }}, + }, + }, + }, + + { + name: "deployment with trust settings set", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.Bool(false), + TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ + { + AccountId: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountId: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "hij", "dfg"}, + }, + }, + TrustExternal: elasticsearchv2.ElasticsearchTrustExternals{ + { + RelationshipId: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + RelationshipId: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "dfg"}, + }, + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("232g"), + }, + }), + "cold": *elasticsearchv2.CreateTierForTest("cold", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("2g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("116g"), + }, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Trust: &models.ElasticsearchClusterTrustSettings{ + Accounts: []*models.AccountTrustRelationship{ + { + AccountID: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountID: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "hij", "dfg", + }, + }, + }, + External: []*models.ExternalTrustRelationship{ + { + TrustRelationshipID: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + TrustRelationshipID: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "dfg", + }, + }, + }, + }, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "cold", + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeRoles: []string{ + "data_cold", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "cold", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(59392), + Resource: ec.String("memory"), + }, + }, + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(237568), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{ + "data": "warm", + }, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Cross Cluster Search)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + }, + }, + client: api.NewMock(mock.New200Response(ccsTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{}, + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-cross-cluster-search-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.ccs.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with tags", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Tags: map[string]string{ + "aaa": "bbb", + "owner": "elastic", + "cost-center": "rnd", + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{Tags: []*models.MetadataItem{ + {Key: ec.String("aaa"), Value: ec.String("bbb")}, + {Key: ec.String("cost-center"), Value: ec.String("rnd")}, + {Key: ec.String("owner"), Value: ec.String("elastic")}, + }}, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "handles a snapshot_source block, leaving the strategy as is", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ + SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentCreateRequest{ + Name: "my_deployment_name", + Settings: &models.DeploymentCreateSettings{}, + Metadata: &models.DeploymentCreateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentCreateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", + SnapshotName: ec.String(""), + }, + }, + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + // This case we're using an empty deployment_template to ensure that + // resources not present in the template cannot be expanded, receiving + // an error instead. + { + name: "parses the resources with empty explicit declarations (Empty deployment template)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + }, + client: api.NewMock(mock.New200Response(emptyTpl())), + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("topology matching error", "invalid id ('hot_content'): valid topology IDs are ") + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + diags.AddError("enterprise_search payload error", "enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema := DeploymentSchema() + + var plan DeploymentTF + diags := tfsdk.ValueFrom(context.Background(), &tt.args.plan, schema.Type(), &plan) + assert.Nil(t, diags) + + got, diags := plan.CreateRequest(context.Background(), tt.args.client) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.NotNil(t, got) + assert.Equal(t, *tt.want, *got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go new file mode 100644 index 000000000..e7d197889 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_parse_credentials_test.go @@ -0,0 +1,81 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/stretchr/testify/assert" +) + +func Test_parseCredentials(t *testing.T) { + type args struct { + dep Deployment + resources []*models.DeploymentResource + } + tests := []struct { + name string + args args + want Deployment + }{ + { + name: "Parses credentials", + args: args{ + dep: Deployment{}, + resources: []*models.DeploymentResource{{ + Credentials: &models.ClusterCredentials{ + Username: ec.String("my-username"), + Password: ec.String("my-password"), + }, + SecretToken: "some-secret-token", + }}, + }, + want: Deployment{ + ElasticsearchUsername: "my-username", + ElasticsearchPassword: "my-password", + ApmSecretToken: ec.String("some-secret-token"), + }, + }, + { + name: "when no credentials are passed, it doesn't overwrite them", + args: args{ + dep: Deployment{ + ElasticsearchUsername: "my-username", + ElasticsearchPassword: "my-password", + ApmSecretToken: ec.String("some-secret-token"), + }, + resources: []*models.DeploymentResource{ + {}, + }, + }, + want: Deployment{ + ElasticsearchUsername: "my-username", + ElasticsearchPassword: "my-password", + ApmSecretToken: ec.String("some-secret-token"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tt.args.dep.parseCredentials(tt.args.resources) + assert.Equal(t, tt.want, tt.args.dep) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go new file mode 100644 index 000000000..fa8ecd8cc --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read.go @@ -0,0 +1,366 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "errors" + "fmt" + "strings" + + "github.com/blang/semver" + "github.com/elastic/cloud-sdk-go/pkg/models" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type Deployment struct { + Id string `tfsdk:"id"` + Alias string `tfsdk:"alias"` + Version string `tfsdk:"version"` + Region string `tfsdk:"region"` + DeploymentTemplateId string `tfsdk:"deployment_template_id"` + Name string `tfsdk:"name"` + RequestId string `tfsdk:"request_id"` + ElasticsearchUsername string `tfsdk:"elasticsearch_username"` + ElasticsearchPassword string `tfsdk:"elasticsearch_password"` + ApmSecretToken *string `tfsdk:"apm_secret_token"` + TrafficFilter []string `tfsdk:"traffic_filter"` + Tags map[string]string `tfsdk:"tags"` + Elasticsearch *elasticsearchv2.Elasticsearch `tfsdk:"elasticsearch"` + Kibana *kibanav2.Kibana `tfsdk:"kibana"` + Apm *apmv2.Apm `tfsdk:"apm"` + IntegrationsServer *integrationsserverv2.IntegrationsServer `tfsdk:"integrations_server"` + EnterpriseSearch *enterprisesearchv2.EnterpriseSearch `tfsdk:"enterprise_search"` + Observability *observabilityv2.Observability `tfsdk:"observability"` +} + +// Nullify Elasticsearch topologies that have zero size and are not specified in plan +func (dep *Deployment) NullifyUnusedEsTopologies(ctx context.Context, esPlan *elasticsearchv2.ElasticsearchTF) diag.Diagnostics { + if dep.Elasticsearch == nil { + return nil + } + + if esPlan == nil { + return nil + } + + var planTopology elasticsearchv2.ElasticsearchTopologiesTF + if diags := esPlan.Topology.ElementsAs(ctx, &planTopology, true); diags.HasError() { + return diags + } + + filteredTopologies := make(elasticsearchv2.ElasticsearchTopologies, len(dep.Elasticsearch.Topology)) + + for id, tier := range dep.Elasticsearch.Topology { + _, exist := planTopology[id] + + size, err := converters.ParseTopologySize(tier.Size, tier.SizeResource) + + if err != nil { + var diags diag.Diagnostics + diags.AddError("Cannot remove unused Elasticsearch topologies from backend response", err.Error()) + return diags + } + + if size == nil || size.Value == nil { + var diags diag.Diagnostics + diags.AddError("Cannot remove unused Elasticsearch topologies from backend response", fmt.Sprintf("the topology [%s] size is nil", id)) + return diags + } + + if !exist && *size.Value == 0 { + continue + } + + filteredTopologies[id] = tier + } + + dep.Elasticsearch.Topology = filteredTopologies + + return nil +} + +func ReadDeployment(res *models.DeploymentGetResponse, remotes *models.RemoteResources, deploymentResources []*models.DeploymentResource) (*Deployment, error) { + var dep Deployment + + if res.ID == nil { + return nil, utils.MissingField("ID") + } + dep.Id = *res.ID + + dep.Alias = res.Alias + + if res.Name == nil { + return nil, utils.MissingField("Name") + } + dep.Name = *res.Name + + if res.Metadata != nil { + dep.Tags = converters.ModelsTagsToMap(res.Metadata.Tags) + } + + if res.Resources == nil { + return nil, nil + } + + templateID, err := getDeploymentTemplateID(res.Resources) + if err != nil { + return nil, err + } + + dep.DeploymentTemplateId = templateID + + dep.Region = getRegion(res.Resources) + + // We're reconciling the version and storing the lowest version of any + // of the deployment resources. This ensures that if an upgrade fails, + // the state version will be lower than the desired version, making + // retries possible. Once more resource types are added, the function + // needs to be modified to check those as well. + version, err := getLowestVersion(res.Resources) + if err != nil { + // This code path is highly unlikely, but we're bubbling up the + // error in case one of the versions isn't parseable by semver. + return nil, fmt.Errorf("failed reading deployment: %w", err) + } + dep.Version = version + + dep.Elasticsearch, err = elasticsearchv2.ReadElasticsearches(res.Resources.Elasticsearch, remotes) + if err != nil { + return nil, err + } + + if dep.Kibana, err = kibanav2.ReadKibanas(res.Resources.Kibana); err != nil { + return nil, err + } + + if dep.Apm, err = apmv2.ReadApms(res.Resources.Apm); err != nil { + return nil, err + } + + if dep.IntegrationsServer, err = integrationsserverv2.ReadIntegrationsServers(res.Resources.IntegrationsServer); err != nil { + return nil, err + } + + if dep.EnterpriseSearch, err = enterprisesearchv2.ReadEnterpriseSearches(res.Resources.EnterpriseSearch); err != nil { + return nil, err + } + + if dep.TrafficFilter, err = readTrafficFilters(res.Settings); err != nil { + return nil, err + } + + if dep.Observability, err = observabilityv2.ReadObservability(res.Settings); err != nil { + return nil, err + } + + dep.parseCredentials(deploymentResources) + + return &dep, nil +} + +func readTrafficFilters(in *models.DeploymentSettings) ([]string, error) { + if in == nil || in.TrafficFilterSettings == nil || len(in.TrafficFilterSettings.Rulesets) == 0 { + return nil, nil + } + + var rules []string + + return append(rules, in.TrafficFilterSettings.Rulesets...), nil +} + +// parseCredentials parses the Create or Update response Resources populating +// credential settings in the Terraform state if the keys are found, currently +// populates the following credentials in plain text: +// * Elasticsearch username and Password +func (dep *Deployment) parseCredentials(resources []*models.DeploymentResource) { + for _, res := range resources { + + if creds := res.Credentials; creds != nil { + if creds.Username != nil && *creds.Username != "" { + dep.ElasticsearchUsername = *creds.Username + } + + if creds.Password != nil && *creds.Password != "" { + dep.ElasticsearchPassword = *creds.Password + } + } + + if res.SecretToken != "" { + dep.ApmSecretToken = &res.SecretToken + } + } +} + +func (dep *Deployment) ProcessSelfInObservability() { + + if dep.Observability == nil { + return + } + + if dep.Observability.DeploymentId == nil { + return + } + + if *dep.Observability.DeploymentId == dep.Id { + *dep.Observability.DeploymentId = "self" + } +} + +func (dep *Deployment) SetCredentialsIfEmpty(state *DeploymentTF) { + if state == nil { + return + } + + if dep.ElasticsearchPassword == "" && state.ElasticsearchPassword.Value != "" { + dep.ElasticsearchPassword = state.ElasticsearchPassword.Value + } + + if dep.ElasticsearchUsername == "" && state.ElasticsearchUsername.Value != "" { + dep.ElasticsearchUsername = state.ElasticsearchUsername.Value + } + + if (dep.ApmSecretToken == nil || *dep.ApmSecretToken == "") && state.ApmSecretToken.Value != "" { + dep.ApmSecretToken = &state.ApmSecretToken.Value + } +} + +func getLowestVersion(res *models.DeploymentResources) (string, error) { + // We're starting off with a very high version so it can be replaced. + replaceVersion := `99.99.99` + version := semver.MustParse(replaceVersion) + for _, r := range res.Elasticsearch { + if !util.IsCurrentEsPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Elasticsearch.Version + if err := swapLowerVersion(&version, v); err != nil && !elasticsearchv2.IsElasticsearchStopped(r) { + return "", fmt.Errorf("elasticsearch version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.Kibana { + if !util.IsCurrentKibanaPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Kibana.Version + if err := swapLowerVersion(&version, v); err != nil && !kibanav2.IsKibanaStopped(r) { + return version.String(), fmt.Errorf("kibana version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.Apm { + if !util.IsCurrentApmPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.Apm.Version + if err := swapLowerVersion(&version, v); err != nil && !apmv2.IsApmStopped(r) { + return version.String(), fmt.Errorf("apm version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.IntegrationsServer { + if !util.IsCurrentIntegrationsServerPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.IntegrationsServer.Version + if err := swapLowerVersion(&version, v); err != nil && !integrationsserverv2.IsIntegrationsServerStopped(r) { + return version.String(), fmt.Errorf("integrations_server version '%s' is not semver compliant: %w", v, err) + } + } + } + + for _, r := range res.EnterpriseSearch { + if !util.IsCurrentEssPlanEmpty(r) { + v := r.Info.PlanInfo.Current.Plan.EnterpriseSearch.Version + if err := swapLowerVersion(&version, v); err != nil && !enterprisesearchv2.IsEnterpriseSearchStopped(r) { + return version.String(), fmt.Errorf("enterprise search version '%s' is not semver compliant: %w", v, err) + } + } + } + + if version.String() != replaceVersion { + return version.String(), nil + } + return "", errors.New("unable to determine the lowest version for any the deployment components") +} + +func swapLowerVersion(version *semver.Version, comp string) error { + if comp == "" { + return nil + } + + v, err := semver.Parse(comp) + if err != nil { + return err + } + if v.LT(*version) { + *version = v + } + return nil +} + +func getRegion(res *models.DeploymentResources) string { + for _, r := range res.Elasticsearch { + if r.Region != nil && *r.Region != "" { + return *r.Region + } + } + + return "" +} + +func getDeploymentTemplateID(res *models.DeploymentResources) (string, error) { + var deploymentTemplateID string + var foundTemplates []string + for _, esRes := range res.Elasticsearch { + if util.IsCurrentEsPlanEmpty(esRes) { + continue + } + + var emptyDT = esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate == nil + if emptyDT { + continue + } + + if deploymentTemplateID == "" { + deploymentTemplateID = *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID + } + + foundTemplates = append(foundTemplates, + *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID, + ) + } + + if deploymentTemplateID == "" { + return "", errors.New("failed to obtain the deployment template id") + } + + if len(foundTemplates) > 1 { + return "", fmt.Errorf( + "there are more than 1 deployment templates specified on the deployment: \"%s\"", strings.Join(foundTemplates, ", "), + ) + } + + return deploymentTemplateID, nil +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go new file mode 100644 index 000000000..c57b75abb --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_read_test.go @@ -0,0 +1,1633 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "errors" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/stretchr/testify/assert" +) + +func Test_readDeployment(t *testing.T) { + type args struct { + res *models.DeploymentGetResponse + remotes models.RemoteResources + } + tests := []struct { + name string + args args + want Deployment + err error + }{ + { + name: "flattens deployment resources", + want: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + Observability: &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + args: args{ + res: &models.DeploymentGetResponse{ + ID: &mock.ValidClusterID, + Alias: "my-deployment", + Name: ec.String("my_deployment_name"), + Settings: &models.DeploymentSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-name"), + Region: "us-east-1", + ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ + DestinationClusterIds: []string{"some"}, + }, + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + }, + }, + }, + }, + }, + Kibana: []*models.KibanaResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.KibanaClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-kibana-name"), + Region: "us-east-1", + PlanInfo: &models.KibanaClusterPlansInfo{ + Current: &models.KibanaClusterPlanInfo{ + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + Version: "7.7.0", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmResourceInfo{{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.ApmInfo{ + Status: ec.String("started"), + ID: &mock.ValidClusterID, + Name: ec.String("some-apm-name"), + Region: "us-east-1", + PlanInfo: &models.ApmPlansInfo{ + Current: &models.ApmPlanInfo{ + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + Version: "7.7.0", + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + }, + }}, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.EnterpriseSearchInfo{ + Status: ec.String("started"), + ID: &mock.ValidClusterID, + Name: ec.String("some-enterprise_search-name"), + Region: "us-east-1", + PlanInfo: &models.EnterpriseSearchPlansInfo{ + Current: &models.EnterpriseSearchPlanInfo{ + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + Version: "7.7.0", + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "sets the global version to the lesser version", + args: args{ + res: &models.DeploymentGetResponse{ + ID: &mock.ValidClusterID, + Alias: "my-deployment", + Name: ec.String("my_deployment_name"), + Settings: &models.DeploymentSettings{ + TrafficFilterSettings: &models.TrafficFilterSettings{ + Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-name"), + Region: "us-east-1", + ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ + DestinationClusterIds: []string{"some"}, + }, + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + }, + }, + }, + }, + }, + Kibana: []*models.KibanaResourceInfo{ + { + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Info: &models.KibanaClusterInfo{ + Status: ec.String("started"), + ClusterID: &mock.ValidClusterID, + ClusterName: ec.String("some-kibana-name"), + Region: "us-east-1", + PlanInfo: &models.KibanaClusterPlansInfo{ + Current: &models.KibanaClusterPlanInfo{ + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + Version: "7.6.2", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + want: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.6.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + }, + + { + name: "flattens an azure plan (io-optimized)", + args: args{ + res: deploymentGetResponseFromFile(t, "../../testdata/deployment-azure-io-optimized.json"), + }, + want: Deployment{ + Id: "123e79d8109c4a0790b0b333110bf715", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "azure-io-optimized", + Region: "azure-eastus2", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1238f19957874af69306787dca662154"), + Region: ec.String("azure-eastus2"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d:somecloudID"), + HttpEndpoint: ec.String("http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("azure.data.highio.l32sv2"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + Config: &elasticsearchv2.ElasticsearchConfig{}, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("1235cd4a4c7f464bbcfd795f3638b769"), + Region: ec.String("azure-eastus2"), + HttpEndpoint: ec.String("http://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9243"), + InstanceConfigurationId: ec.String("azure.kibana.e32sv3"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("1235d8c911b74dd6a03c2a7b37fd68ab"), + Region: ec.String("azure-eastus2"), + HttpEndpoint: ec.String("http://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:443"), + InstanceConfigurationId: ec.String("azure.apm.e32sv3"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan (io-optimized)", + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-io-optimized.json")}, + want: Deployment{ + Id: "123365f2805e46808d40849b1c0b266b", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), + Region: ec.String("aws-eu-central-1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("123dcfda06254ca789eb287e8b73ff4c"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12328579b3bf40c8b58c1a0ed5a4bd8b"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan with extensions (io-optimized)", + args: args{ + res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-io-optimized-extension.json"), + }, + want: Deployment{ + Id: "123365f2805e46808d40849b1c0b266b", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), + Region: ec.String("aws-eu-central-1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + Extension: elasticsearchv2.ElasticsearchExtensions{ + { + Name: "custom-bundle", + Version: "7.9.2", + Url: "http://12345", + Type: "bundle", + }, + { + Name: "custom-bundle2", + Version: "7.9.2", + Url: "http://123456", + Type: "bundle", + }, + { + Name: "custom-plugin", + Version: "7.9.2", + Url: "http://12345", + Type: "plugin", + }, + { + Name: "custom-plugin2", + Version: "7.9.2", + Url: "http://123456", + Type: "plugin", + }, + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("123dcfda06254ca789eb287e8b73ff4c"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12328579b3bf40c8b58c1a0ed5a4bd8b"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan with trusts", + args: args{ + res: &models.DeploymentGetResponse{ + ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), + Alias: "OH", + Name: ec.String("up2d"), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + RefID: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.13.1", + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Size: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("memory"), + }, + }}, + }, + }, + }, + Settings: &models.ElasticsearchClusterSettings{ + Trust: &models.ElasticsearchClusterTrustSettings{ + Accounts: []*models.AccountTrustRelationship{ + { + AccountID: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountID: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "dfg", "hij", + }, + }, + }, + External: []*models.ExternalTrustRelationship{ + { + TrustRelationshipID: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + TrustRelationshipID: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{ + "abc", "dfg", + }, + }, + }, + }, + }, + }, + }}, + }, + }, + }, + want: Deployment{ + Id: "123b7b540dfc967a7a649c18e2fce4ed", + Alias: "OH", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.13.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + TrustAccount: elasticsearchv2.ElasticsearchTrustAccounts{ + { + AccountId: ec.String("ANID"), + TrustAll: ec.Bool(true), + }, + { + AccountId: ec.String("anotherID"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "dfg", "hij"}, + }, + }, + TrustExternal: elasticsearchv2.ElasticsearchTrustExternals{ + { + RelationshipId: ec.String("external_id"), + TrustAll: ec.Bool(true), + }, + { + RelationshipId: ec.String("another_external_id"), + TrustAll: ec.Bool(false), + TrustAllowlist: []string{"abc", "dfg"}, + }, + }, + }, + }, + }, + + { + name: "flattens an aws plan with topology.config set", + args: args{ + res: &models.DeploymentGetResponse{ + ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), + Alias: "OH", + Name: ec.String("up2d"), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + RefID: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.13.1", + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Size: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("memory"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + UserSettingsYaml: "a.setting: true", + }, + }}, + }, + }, + }, + Settings: &models.ElasticsearchClusterSettings{}, + }, + }}, + }, + }, + }, + want: Deployment{ + Id: "123b7b540dfc967a7a649c18e2fce4ed", + Alias: "OH", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.13.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + }, + + { + name: "flattens an plan with config.docker_image set", + args: args{ + res: &models.DeploymentGetResponse{ + ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), + Alias: "OH", + Name: ec.String("up2d"), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + RefID: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/elasticsearch/cloud:7.14.1-hash", + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Size: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("memory"), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + UserSettingsYaml: "a.setting: true", + }, + ZoneCount: 1, + }}, + }, + }, + }, + Settings: &models.ElasticsearchClusterSettings{}, + }, + }}, + Apm: []*models.ApmResourceInfo{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + RefID: ec.String("main-apm"), + Region: ec.String("aws-eu-central-1"), + Info: &models.ApmInfo{ + Status: ec.String("running"), + PlanInfo: &models.ApmPlansInfo{Current: &models.ApmPlanInfo{ + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/apm/cloud:7.14.1-hash", + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + ZoneCount: 1, + }}, + }, + }}, + }, + }}, + Kibana: []*models.KibanaResourceInfo{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + RefID: ec.String("main-kibana"), + Region: ec.String("aws-eu-central-1"), + Info: &models.KibanaClusterInfo{ + Status: ec.String("running"), + PlanInfo: &models.KibanaClusterPlansInfo{Current: &models.KibanaClusterPlanInfo{ + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/kibana/cloud:7.14.1-hash", + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{{ + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + ZoneCount: 1, + }}, + }, + }}, + }, + }}, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + RefID: ec.String("main-enterprise_search"), + Region: ec.String("aws-eu-central-1"), + Info: &models.EnterpriseSearchInfo{ + Status: ec.String("running"), + PlanInfo: &models.EnterpriseSearchPlansInfo{Current: &models.EnterpriseSearchPlanInfo{ + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + Version: "7.14.1", + DockerImage: "docker.elastic.com/enterprise_search/cloud:7.14.1-hash", + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + ZoneCount: 2, + }}, + }, + }}, + }, + }}, + }, + }, + }, + want: Deployment{ + Id: "123b7b540dfc967a7a649c18e2fce4ed", + Alias: "OH", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.14.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Region: ec.String("aws-eu-central-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + DockerImage: ec.String("docker.elastic.com/elasticsearch/cloud:7.14.1-hash"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + Region: ec.String("aws-eu-central-1"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &kibanav2.KibanaConfig{ + DockerImage: ec.String("docker.elastic.com/kibana/cloud:7.14.1-hash"), + }, + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + Region: ec.String("aws-eu-central-1"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &apmv2.ApmConfig{ + DockerImage: ec.String("docker.elastic.com/apm/cloud:7.14.1-hash"), + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + Region: ec.String("aws-eu-central-1"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Config: &enterprisesearchv2.EnterpriseSearchConfig{ + DockerImage: ec.String("docker.elastic.com/enterprise_search/cloud:7.14.1-hash"), + }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + }, + }, + + { + name: "flattens an aws plan (io-optimized) with tags", + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-io-optimized-tags.json")}, + want: Deployment{ + Id: "123365f2805e46808d40849b1c0b266b", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "aws-eu-central-1", + Version: "7.9.2", + Tags: map[string]string{ + "aaa": "bbb", + "cost": "rnd", + "owner": "elastic", + }, + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1239f7ee7196439ba2d105319ac5eba7"), + Region: ec.String("aws-eu-central-1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("123dcfda06254ca789eb287e8b73ff4c"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12328579b3bf40c8b58c1a0ed5a4bd8b"), + Region: ec.String("aws-eu-central-1"), + HttpEndpoint: ec.String("http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443"), + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan (io-optimized)", + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-io-optimized.json")}, + want: Deployment{ + Id: "1239e402d6df471ea374bd68e3f91cc5", + Alias: "my-deployment", + Name: "up2d", + DeploymentTemplateId: "gcp-io-optimized", + Region: "gcp-asia-east1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123695e76d914005bf90b717e668ad4b"), + Region: ec.String("gcp-asia-east1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12365046781e4d729a07df64fe67c8c6"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12307c6c304949b8a9f3682b80900879"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80"), + HttpsEndpoint: ec.String("https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan with autoscale set (io-optimized)", + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-io-optimized-autoscale.json")}, + want: Deployment{ + Id: "1239e402d6df471ea374bd68e3f91cc5", + Alias: "", + Name: "up2d", + DeploymentTemplateId: "gcp-io-optimized", + Region: "gcp-asia-east1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123695e76d914005bf90b717e668ad4b"), + Region: ec.String("gcp-asia-east1"), + Autoscale: ec.Bool(true), + CloudID: ec.String("up2d:someCloudID"), + HttpEndpoint: ec.String("http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("8g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + MaxSizeResource: ec.String("memory"), + PolicyOverrideJson: ec.String(`{"proactive_storage":{"forecast_window":"3 h"}}`), + }, + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.master.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 3, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "ml": *elasticsearchv2.CreateTierForTest("ml", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.ml.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("true"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("30g"), + MaxSizeResource: ec.String("memory"), + MinSize: ec.String("1g"), + MinSizeResource: ec.String("memory"), + }, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12365046781e4d729a07df64fe67c8c6"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200"), + HttpsEndpoint: ec.String("https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("12307c6c304949b8a9f3682b80900879"), + Region: ec.String("gcp-asia-east1"), + HttpEndpoint: ec.String("http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80"), + HttpsEndpoint: ec.String("https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan (hot-warm)", + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-hot-warm.json")}, + want: Deployment{ + Id: "123d148423864552aa57b59929d4bf4d", + Name: "up2d-hot-warm", + DeploymentTemplateId: "gcp-hot-warm", + Region: "gcp-us-central1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123e837db6ee4391bb74887be35a7a91"), + Region: ec.String("gcp-us-central1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d-hot-warm:someCloudID"), + HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("false"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("false"), + NodeTypeMl: ec.String("false"), + ZoneCount: 2, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12372cc60d284e7e96b95ad14727c23d"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("1234b68b0b9347f1b49b1e01b33bf4a4"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens a gcp plan (hot-warm) with node_roles", + args: args{res: deploymentGetResponseFromFile(t, "../../testdata/deployment-gcp-hot-warm-node_roles.json")}, + want: Deployment{ + Id: "123d148423864552aa57b59929d4bf4d", + Name: "up2d-hot-warm", + DeploymentTemplateId: "gcp-hot-warm", + Region: "gcp-us-central1", + Version: "7.11.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("123e837db6ee4391bb74887be35a7a91"), + Region: ec.String("gcp-us-central1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("up2d-hot-warm:someCloudID"), + HttpEndpoint: ec.String("http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.coordinating.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{"ingest", "remote_cluster_client"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highio.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "master": *elasticsearchv2.CreateTierForTest("master", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.master.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 3, + NodeRoles: []string{"master"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "ml": *elasticsearchv2.CreateTierForTest("ml", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.ml.1"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeRoles: []string{"ml", "remote_cluster_client"}, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("gcp.data.highstorage.1"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12372cc60d284e7e96b95ad14727c23d"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200"), + HttpsEndpoint: ec.String("https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243"), + InstanceConfigurationId: ec.String("gcp.kibana.1"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String("1234b68b0b9347f1b49b1e01b33bf4a4"), + Region: ec.String("gcp-us-central1"), + HttpEndpoint: ec.String("http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80"), + HttpsEndpoint: ec.String("https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443"), + InstanceConfigurationId: ec.String("gcp.apm.1"), + Size: ec.String("0.5g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + + { + name: "flattens an aws plan (Cross Cluster Search)", + args: args{ + res: deploymentGetResponseFromFile(t, "../../testdata/deployment-aws-ccs.json"), + remotes: models.RemoteResources{Resources: []*models.RemoteResourceRef{ + { + Alias: ec.String("alias"), + DeploymentID: ec.String("someid"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + DeploymentID: ec.String("some other id"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + }, + }}, + }, + want: Deployment{ + Id: "123987dee8d54505974295e07fc7d13e", + Name: "ccs", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "eu-west-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String("1230b3ae633b4f51a432d50971f7f1c1"), + Region: ec.String("eu-west-1"), + Autoscale: ec.Bool(false), + CloudID: ec.String("ccs:someCloudID"), + HttpEndpoint: ec.String("http://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9200"), + HttpsEndpoint: ec.String("https://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9243"), + Config: &elasticsearchv2.ElasticsearchConfig{}, + RemoteCluster: elasticsearchv2.ElasticsearchRemoteClusters{ + { + Alias: ec.String("alias"), + DeploymentId: ec.String("someid"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + DeploymentId: ec.String("some other id"), + RefId: ec.String("main-elasticsearch"), + }, + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.ccs.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String("12317425e9e14491b74ee043db3402eb"), + Region: ec.String("eu-west-1"), + HttpEndpoint: ec.String("http://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9200"), + HttpsEndpoint: ec.String("https://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9243"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + dep, err := ReadDeployment(tt.args.res, &tt.args.remotes, nil) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + assert.NotNil(t, dep) + assert.Equal(t, tt.want, *dep) + } + }) + } +} + +func Test_getDeploymentTemplateID(t *testing.T) { + type args struct { + res *models.DeploymentResources + } + tests := []struct { + name string + args args + want string + err error + }{ + { + name: "empty resources returns an error", + args: args{res: &models.DeploymentResources{}}, + err: errors.New("failed to obtain the deployment template id"), + }, + { + name: "single empty current plan returns error", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Pending: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized"), + }, + }, + }, + }, + }, + }, + }, + }}, + err: errors.New("failed to obtain the deployment template id"), + }, + { + name: "multiple deployment templates returns an error", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("someid"), + }, + }, + }, + }, + }, + }, + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("someotherid"), + }, + }, + }, + }, + }, + }, + }, + }}, + err: errors.New("there are more than 1 deployment templates specified on the deployment: \"someid, someotherid\""), + }, + { + name: "single deployment template returns it", + args: args{res: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + { + Info: &models.ElasticsearchClusterInfo{ + PlanInfo: &models.ElasticsearchClusterPlansInfo{ + Current: &models.ElasticsearchClusterPlanInfo{ + Plan: &models.ElasticsearchClusterPlan{ + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized"), + }, + }, + }, + }, + }, + }, + }, + }}, + want: "aws-io-optimized", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := getDeploymentTemplateID(tt.args.res) + if tt.err != nil { + assert.EqualError(t, err, tt.err.Error()) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go new file mode 100644 index 000000000..74e0dc03d --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_test_utils.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "encoding/json" + "io" + "os" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" +) + +func elasticsearchPayloadFromReader(t *testing.T, rc io.Reader, useNodeRoles bool) *models.ElasticsearchPayload { + t.Helper() + + var tpl models.DeploymentTemplateInfoV2 + if err := json.NewDecoder(rc).Decode(&tpl); err != nil { + t.Fatal(err) + } + + return elasticsearchv2.EnrichElasticsearchTemplate( + tpl.DeploymentTemplate.Resources.Elasticsearch[0], + *tpl.ID, + "", + useNodeRoles, + ) +} + +func deploymentGetResponseFromFile(t *testing.T, filename string) *models.DeploymentGetResponse { + t.Helper() + f, err := os.Open(filename) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + var res models.DeploymentGetResponse + if err := json.NewDecoder(f).Decode(&res); err != nil { + t.Fatal(err) + } + return &res +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go new file mode 100644 index 000000000..6c7695960 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload.go @@ -0,0 +1,157 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +func (plan DeploymentTF) UpdateRequest(ctx context.Context, client *api.API, state DeploymentTF) (*models.DeploymentUpdateRequest, diag.Diagnostics) { + var result = models.DeploymentUpdateRequest{ + Name: plan.Name.Value, + Alias: plan.Alias.Value, + PruneOrphans: ec.Bool(true), + Resources: &models.DeploymentUpdateResources{}, + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{}, + } + + dtID := plan.DeploymentTemplateId.Value + + var diagsnostics diag.Diagnostics + + template, err := deptemplateapi.Get(deptemplateapi.GetParams{ + API: client, + TemplateID: dtID, + Region: plan.Region.Value, + HideInstanceConfigurations: true, + }) + if err != nil { + diagsnostics.AddError("Deployment template get error", err.Error()) + return nil, diagsnostics + } + + // When the deployment template is changed, we need to skip the missing + // resource topologies to account for a new instance_configuration_id and + // a different default value. + skipEStopologies := plan.DeploymentTemplateId.Value != "" && plan.DeploymentTemplateId.Value != state.DeploymentTemplateId.Value && state.DeploymentTemplateId.Value != "" + // If the deployment_template_id is changed, then we skip updating the + // Elasticsearch topology to account for the case where the + // instance_configuration_id changes, i.e. Hot / Warm, etc. + // This might not be necessary going forward as we move to + // tiered Elasticsearch nodes. + + useNodeRoles, diags := elasticsearchv2.UseNodeRoles(ctx, state.Version, plan.Version, plan.Elasticsearch) + + if diags.HasError() { + return nil, diags + } + + elasticsearchPayload, diags := elasticsearchv2.ElasticsearchPayload(ctx, plan.Elasticsearch, template, dtID, plan.Version.Value, useNodeRoles, skipEStopologies) + + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if elasticsearchPayload != nil { + // if the restore snapshot operation has been specified, the snapshot restore + // can't be full once the cluster has been created, so the Strategy must be set + // to "partial". + ensurePartialSnapshotStrategy(elasticsearchPayload) + + result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, elasticsearchPayload) + } + + kibanaPayload, diags := kibanav2.KibanaPayload(ctx, plan.Kibana, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if kibanaPayload != nil { + result.Resources.Kibana = append(result.Resources.Kibana, kibanaPayload) + } + + apmPayload, diags := apmv2.ApmPayload(ctx, plan.Apm, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if apmPayload != nil { + result.Resources.Apm = append(result.Resources.Apm, apmPayload) + } + + integrationsServerPayload, diags := integrationsserverv2.IntegrationsServerPayload(ctx, plan.IntegrationsServer, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if integrationsServerPayload != nil { + result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerPayload) + } + + enterpriseSearchPayload, diags := enterprisesearchv2.EnterpriseSearchesPayload(ctx, plan.EnterpriseSearch, template) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + if enterpriseSearchPayload != nil { + result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchPayload) + } + + observabilityPayload, diags := observabilityv2.ObservabilityPayload(ctx, plan.Observability, client) + if diags.HasError() { + diagsnostics.Append(diags...) + } + result.Settings.Observability = observabilityPayload + + // In order to stop shipping logs and metrics, an empty Observability + // object must be passed, as opposed to a nil object when creating a + // deployment without observability settings. + if plan.Observability.IsNull() && !state.Observability.IsNull() { + result.Settings.Observability = &models.DeploymentObservabilitySettings{} + } + + result.Metadata.Tags, diags = converters.TypesMapToModelsTags(ctx, plan.Tags) + if diags.HasError() { + diagsnostics.Append(diags...) + } + + return &result, diagsnostics +} + +func ensurePartialSnapshotStrategy(es *models.ElasticsearchPayload) { + transient := es.Plan.Transient + if transient == nil || transient.RestoreSnapshot == nil { + return + } + transient.RestoreSnapshot.Strategy = "partial" +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go new file mode 100644 index 000000000..9d0e67053 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/deployment_update_payload_test.go @@ -0,0 +1,2141 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "io" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/stretchr/testify/assert" + + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" +) + +func Test_updateResourceToModel(t *testing.T) { + defaultHotTier := elasticsearchv2.CreateTierForTest( + "hot_content", + elasticsearchv2.ElasticsearchTopology{ + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }, + ) + + defaultElasticsearch := &elasticsearchv2.Elasticsearch{ + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + } + + var ioOptimizedTpl = func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-io-optimized-v2.json") + } + + hotWarmTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-hot-warm-v2.json") + } + + ccsTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-aws-cross-cluster-search-v2.json") + } + + emptyTpl := func() io.ReadCloser { + return fileAsResponseBody(t, "../../testdata/template-empty.json") + } + + type args struct { + plan Deployment + state *Deployment + client *api.API + } + tests := []struct { + name string + args args + want *models.DeploymentUpdateRequest + diags diag.Diagnostics + }{ + { + name: "parses the resources", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + Observability: &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock( + mock.New200Response(ioOptimizedTpl()), + mock.New200Response( + mock.NewStructBody(models.DeploymentGetResponse{ + Healthy: ec.Bool(true), + ID: ec.String(mock.ValidClusterID), + Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{{ + ID: ec.String(mock.ValidClusterID), + RefID: ec.String("main-elasticsearch"), + }}, + }, + }), + ), + ), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 1, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{ + SystemSettings: &models.ApmSystemSettings{ + DebugEnabled: ec.Bool(false), + }, + }, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("apm-ref-id"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("enterprise_search-ref-id"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with topology overrides", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("4g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + RefId: ec.String("main-apm"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + Size: ec.String("4g"), + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.7.0", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }, + }, + }, + }, + }, + Apm: []*models.ApmPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }}, + }, + }, + }, + EnterpriseSearch: []*models.EnterpriseSearchPayload{ + { + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "parses the resources with empty declarations (Hot Warm)", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-hot-warm-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + }, + client: api.NewMock(mock.New200Response(hotWarmTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("es-ref-id"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + Curation: nil, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + Curation: nil, + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-hot-warm-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d2", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(false), + }, + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{ + { + ElasticsearchClusterRefID: ec.String("es-ref-id"), + Region: ec.String("us-east-1"), + RefID: ec.String("kibana-ref-id"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + }, + }, + }, + + { + name: "toplogy change from hot / warm to cross cluster search", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + }, + Kibana: &kibanav2.Kibana{ + RefId: ec.String("main-kibana"), + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Alias: "my-deployment", + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &elasticsearchv2.ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + ZoneCount: 1, + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + Config: &apmv2.ApmConfig{ + DebugEnabled: ec.Bool(false), + }, + InstanceConfigurationId: ec.String("aws.apm.r5d"), + Size: ec.String("0.5g"), + ZoneCount: 1, + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("us-east-1"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + Observability: &observabilityv2.Observability{ + DeploymentId: ec.String(mock.ValidClusterID), + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + TrafficFilter: []string{"0.0.0.0/0", "192.168.10.0/24"}, + }, + client: api.NewMock(mock.New200Response(ccsTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + Alias: "my-deployment", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{ + Observability: &models.DeploymentObservabilitySettings{}, + }, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{}, + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-cross-cluster-search-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.ccs.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }}, + }, + }, + }, + + // The behavior of this change should be: + // * Resets the Elasticsearch topology: from 16g (due to unsetTopology call on DT change). + // * Keeps the kibana toplogy size to 2g even though the topology element has been removed (saved value persists). + // * Removes all other non present resources + { + name: "topology change with sizes not default from io optimized to cross cluster search", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-cross-cluster-search-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + Size: ec.String("2g"), + }, + }, + client: api.NewMock(mock.New200Response(ccsTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ccsTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{}, + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-cross-cluster-search-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + ZoneCount: 1, + InstanceConfigurationID: "aws.ccs.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + // This field's value is reset. + Value: ec.Int32(1024), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }}, + }, + })}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }}, + }, + }, + }, + + // The behavior of this change should be: + // * Keeps all topology sizes as they were defined (saved value persists). + { + name: "topology change with sizes not default from explicit value to empty", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *defaultHotTier, + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + Size: ec.String("8g"), + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.9.2", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + Kibana: []*models.KibanaPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }}, + Apm: []*models.ApmPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-apm"), + Plan: &models.ApmPlan{ + Apm: &models.ApmConfiguration{}, + ClusterTopology: []*models.ApmTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.apm.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(512), + }, + }}, + }, + }}, + EnterpriseSearch: []*models.EnterpriseSearchPayload{{ + ElasticsearchClusterRefID: ec.String("main-elasticsearch"), + Region: ec.String("us-east-1"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }}, + }, + }, + }, + + { + name: "does not migrate node_type to node_role on version upgrade that's lower than 7.10.0", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.11.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "does not migrate node_type to node_role on version upgrade that's higher than 7.10.0", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.11.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + }), + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.11.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeType: &models.ElasticsearchNodeType{ + Data: ec.Bool(true), + Ingest: ec.Bool(true), + Master: ec.Bool(true), + Ml: ec.Bool(false), + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "migrates node_type to node_role when the existing topology element size is updated", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("32g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(32768), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "migrates node_type to node_role when the existing topology element size is updated and adds warm tier", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "enables autoscaling with the default policies", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.Bool(true), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "warm": *elasticsearchv2.CreateTierForTest("warm", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.12.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Autoscale: ec.Bool(true), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(true), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.12.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ + { + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(16384), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + { + ID: "warm", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "warm"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highstorage.d3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "data_warm", + "remote_cluster_client", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(0), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }, + }, + }, + })}, + }, + }, + }, + + { + name: "parses the resources with tags", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Tags: map[string]string{ + "aaa": "bbb", + "owner": "elastic", + "cost-center": "rnd", + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{Tags: []*models.MetadataItem{ + {Key: ec.String("aaa"), Value: ec.String("bbb")}, + {Key: ec.String("cost-center"), Value: ec.String("rnd")}, + {Key: ec.String("owner"), Value: ec.String("elastic")}, + }}, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "handles a snapshot_source block adding Strategy: partial", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + SnapshotSource: &elasticsearchv2.ElasticsearchSnapshotSource{ + SourceElasticsearchClusterId: "8c63b87af9e24ea49b8a4bfe550e5fe9", + }, + }, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", + SnapshotName: ec.String(""), + Strategy: "partial", + }, + }, + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "handles empty Elasticsearch empty config block", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.10.1", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("8g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + client: api.NewMock(mock.New200Response(ioOptimizedTpl())), + }, + want: &models.DeploymentUpdateRequest{ + Name: "my_deployment_name", + PruneOrphans: ec.Bool(true), + Settings: &models.DeploymentUpdateSettings{}, + Metadata: &models.DeploymentUpdateMetadata{ + Tags: []*models.MetadataItem{}, + }, + Resources: &models.DeploymentUpdateResources{ + Elasticsearch: []*models.ElasticsearchPayload{elasticsearchv2.EnrichWithEmptyTopologies(elasticsearchPayloadFromReader(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ + Region: ec.String("us-east-1"), + RefID: ec.String("main-elasticsearch"), + Settings: &models.ElasticsearchClusterSettings{ + DedicatedMastersThreshold: 6, + }, + Plan: &models.ElasticsearchClusterPlan{ + AutoscalingEnabled: ec.Bool(false), + Elasticsearch: &models.ElasticsearchConfiguration{ + Version: "7.10.1", + }, + DeploymentTemplate: &models.DeploymentTemplateReference{ + ID: ec.String("aws-io-optimized-v2"), + }, + ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ + ID: "hot_content", + Elasticsearch: &models.ElasticsearchConfiguration{ + NodeAttributes: map[string]string{"data": "hot"}, + }, + ZoneCount: 2, + InstanceConfigurationID: "aws.data.highio.i3", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(8192), + }, + NodeRoles: []string{ + "master", + "ingest", + "remote_cluster_client", + "data_hot", + "transform", + "data_content", + }, + TopologyElementControl: &models.TopologyElementControl{ + Min: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + AutoscalingMax: &models.TopologySize{ + Value: ec.Int32(118784), + Resource: ec.String("memory"), + }, + }}, + }, + })}, + }, + }, + }, + + { + name: "topology change with invalid resources returns an error", + args: args{ + plan: Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "empty-deployment-template", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: defaultElasticsearch, + Kibana: &kibanav2.Kibana{}, + Apm: &apmv2.Apm{}, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{}, + }, + state: &Deployment{ + Id: mock.ValidClusterID, + Name: "my_deployment_name", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.9.2", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + Topology: elasticsearchv2.ElasticsearchTopologies{ + "hot_content": *elasticsearchv2.CreateTierForTest("hot_content", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + "coordinating": *elasticsearchv2.CreateTierForTest("coordinating", elasticsearchv2.ElasticsearchTopology{ + Size: ec.String("16g"), + Autoscaling: &elasticsearchv2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + Kibana: &kibanav2.Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + Size: ec.String("2g"), + }, + Apm: &apmv2.Apm{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-apm"), + Size: ec.String("1g"), + }, + EnterpriseSearch: &enterprisesearchv2.EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + Size: ec.String("8g"), + }, + }, + client: api.NewMock(mock.New200Response(emptyTpl())), + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + diags.AddError("apm payload error", "apm specified but deployment template is not configured for it. Use a different template if you wish to add apm") + diags.AddError("enterprise_search payload error", "enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema := DeploymentSchema() + + var plan DeploymentTF + diags := tfsdk.ValueFrom(context.Background(), &tt.args.plan, schema.Type(), &plan) + assert.Nil(t, diags) + + state := tt.args.state + if state == nil { + state = &tt.args.plan + } + + var stateTF DeploymentTF + + diags = tfsdk.ValueFrom(context.Background(), state, schema.Type(), &stateTF) + assert.Nil(t, diags) + + got, diags := plan.UpdateRequest(context.Background(), tt.args.client, stateTF) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.NotNil(t, got) + assert.Equal(t, *tt.want, *got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go b/ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go new file mode 100644 index 000000000..2536572db --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/elasticsearch_remote_cluster_payload_test.go @@ -0,0 +1,180 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" +) + +func Test_handleRemoteClusters(t *testing.T) { + type args struct { + plan Deployment + client *api.API + } + tests := []struct { + name string + args args + }{ + { + name: "returns when the resource has no remote clusters", + args: args{ + plan: Deployment{ + Id: "320b7b540dfc967a7a649c18e2fce4ed", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + }, + }, + client: api.NewMock(mock.New202ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultWriteMockHeaders, + Host: api.DefaultMockHost, + Path: `/api/v1/deployments/320b7b540dfc967a7a649c18e2fce4ed/elasticsearch/main-elasticsearch/remote-clusters`, + Method: "PUT", + Body: mock.NewStringBody(`{"resources":[]}` + "\n"), + }, + mock.NewStringBody("{}"), + )), + }, + }, + { + name: "read the remote clusters", + args: args{ + client: api.NewMock(mock.New202ResponseAssertion( + &mock.RequestAssertion{ + Header: api.DefaultWriteMockHeaders, + Host: api.DefaultMockHost, + Path: `/api/v1/deployments/320b7b540dfc967a7a649c18e2fce4ed/elasticsearch/main-elasticsearch/remote-clusters`, + Method: "PUT", + Body: mock.NewStringBody(`{"resources":[{"alias":"alias","deployment_id":"someid","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":true},{"alias":"alias","deployment_id":"some other id","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":false}]}` + "\n"), + }, + mock.NewStringBody("{}"), + )), + plan: Deployment{ + Name: "my_deployment_name", + Id: "320b7b540dfc967a7a649c18e2fce4ed", + DeploymentTemplateId: "aws-io-optimized-v2", + Region: "us-east-1", + Version: "7.7.0", + Elasticsearch: &elasticsearchv2.Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + RemoteCluster: elasticsearchv2.ElasticsearchRemoteClusters{ + { + Alias: ec.String("alias"), + DeploymentId: ec.String("someid"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + Alias: ec.String("alias"), + DeploymentId: ec.String("some other id"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(false), + }, + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schema := DeploymentSchema() + + var planTF DeploymentTF + + diags := tfsdk.ValueFrom(context.Background(), tt.args.plan, schema.Type(), &planTF) + assert.Nil(t, diags) + + diags = HandleRemoteClusters(context.Background(), tt.args.client, planTF.Id.Value, planTF.Elasticsearch) + assert.Nil(t, diags) + }) + } +} + +func Test_writeRemoteClusters(t *testing.T) { + type args struct { + remoteClusters elasticsearchv2.ElasticsearchRemoteClusters + } + tests := []struct { + name string + args args + want *models.RemoteResources + }{ + { + name: "wants no error or empty res", + args: args{ + remoteClusters: elasticsearchv2.ElasticsearchRemoteClusters{}, + }, + want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{}}, + }, + { + name: "expands remotes", + args: args{ + remoteClusters: elasticsearchv2.ElasticsearchRemoteClusters{ + { + Alias: ec.String("alias"), + DeploymentId: ec.String("someid"), + RefId: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + Alias: ec.String("alias"), + DeploymentId: ec.String("some other id"), + RefId: ec.String("main-elasticsearch"), + }, + }, + }, + want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{ + { + Alias: ec.String("alias"), + DeploymentID: ec.String("someid"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + SkipUnavailable: ec.Bool(true), + }, + { + Alias: ec.String("alias"), + DeploymentID: ec.String("some other id"), + ElasticsearchRefID: ec.String("main-elasticsearch"), + }, + }}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var remoteClustersTF types.Set + diags := tfsdk.ValueFrom(context.Background(), tt.args.remoteClusters, elasticsearchv2.ElasticsearchRemoteClusterSchema().FrameworkType(), &remoteClustersTF) + assert.Nil(t, diags) + + got, diags := elasticsearchv2.ElasticsearchRemoteClustersPayload(context.Background(), remoteClustersTF) + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go b/ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go new file mode 100644 index 000000000..3b4f63386 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/partial_stapshot_strategy_test.go @@ -0,0 +1,87 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/stretchr/testify/assert" +) + +func Test_ensurePartialSnapshotStrategy(t *testing.T) { + type args struct { + es *models.ElasticsearchPayload + } + tests := []struct { + name string + args args + want *models.ElasticsearchPayload + }{ + { + name: "ignores resources with no transient block", + args: args{es: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{}, + }}, + want: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{}, + }, + }, + { + name: "ignores resources with no transient.snapshot block", + args: args{es: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{}, + }, + }}, + want: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{}, + }, + }, + }, + { + name: "Sets strategy to partial", + args: args{es: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "some", + }, + }, + }, + }}, + want: &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Transient: &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{ + SourceClusterID: "some", + Strategy: "partial", + }, + }, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + ensurePartialSnapshotStrategy(tt.args.es) + assert.Equal(t, tt.want, tt.args.es) + }) + } +} diff --git a/ec/ecresource/deploymentresource/deployment/v2/schema.go b/ec/ecresource/deploymentresource/deployment/v2/schema.go new file mode 100644 index 000000000..ce99fa796 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment/v2/schema.go @@ -0,0 +1,129 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + observabilityv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v2" +) + +func DeploymentSchema() tfsdk.Schema { + return tfsdk.Schema{ + Version: 2, + // This description is used by the documentation generator and the language server. + MarkdownDescription: "Elastic Cloud Deployment resource", + + Attributes: map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Computed: true, + MarkdownDescription: "Unique identifier of this deployment.", + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "alias": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "version": { + Type: types.StringType, + Description: "Elastic Stack version to use for all of the deployment resources.", + Required: true, + }, + "region": { + Type: types.StringType, + Description: `Region when the deployment should be hosted. For ECE environments this should be set to "ece-region".`, + Required: true, + }, + "deployment_template_id": { + Type: types.StringType, + Description: "Deployment Template identifier to base the deployment from.", + Required: true, + }, + "name": { + Type: types.StringType, + Description: "Name for the deployment", + Optional: true, + }, + "request_id": { + Type: types.StringType, + Description: "request_id to set on the create operation, only used when a previous create attempt returns an error including a request_id.", + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "elasticsearch_username": { + Type: types.StringType, + Description: "Username for authenticating to the Elasticsearch resource.", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "elasticsearch_password": { + Type: types.StringType, + Description: "Password for authenticating to the Elasticsearch resource.", + Computed: true, + Sensitive: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "apm_secret_token": { + Type: types.StringType, + Computed: true, + Sensitive: true, + }, + "traffic_filter": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + Description: "Optional list of traffic filters to apply to this deployment.", + }, + "tags": { + Description: "Optional map of deployment tags", + Type: types.MapType{ + ElemType: types.StringType, + }, + Optional: true, + }, + "elasticsearch": elasticsearchv2.ElasticsearchSchema(), + "kibana": kibanav2.KibanaSchema(), + "apm": apmv2.ApmSchema(), + "integrations_server": integrationsserverv2.IntegrationsServerSchema(), + "enterprise_search": enterprisesearchv2.EnterpriseSearchSchema(), + "observability": observabilityv2.ObservabilitySchema(), + }, + } +} diff --git a/ec/ecresource/deploymentresource/traffic_filter_test.go b/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go similarity index 77% rename from ec/ecresource/deploymentresource/traffic_filter_test.go rename to ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go index 36ace0b6a..59bc5fce8 100644 --- a/ec/ecresource/deploymentresource/traffic_filter_test.go +++ b/ec/ecresource/deploymentresource/deployment/v2/traffic_filter_test.go @@ -15,14 +15,17 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/models" ) func TestParseTrafficFiltering(t *testing.T) { @@ -32,7 +35,7 @@ func TestParseTrafficFiltering(t *testing.T) { tests := []struct { name string args args - want []interface{} + want []string }{ { name: "parses no rules when they're empty", @@ -66,7 +69,7 @@ func TestParseTrafficFiltering(t *testing.T) { }, }, }}, - want: []interface{}{ + want: []string{ "one-id-of-a-rule", "another-id-of-another-rule", }, @@ -74,19 +77,17 @@ func TestParseTrafficFiltering(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - var gotSlice []interface{} - if got := flattenTrafficFiltering(tt.args.settings); got != nil { - gotSlice = got.List() - } - assert.Equal(t, tt.want, gotSlice) + got, err := readTrafficFilters(tt.args.settings) + assert.Nil(t, err) + assert.Equal(t, tt.want, got) }) } } -func Test_expandTrafficFilterCreate(t *testing.T) { +func Test_trafficFilterToModel(t *testing.T) { type args struct { - v *schema.Set - req *models.DeploymentCreateRequest + filters []string + req *models.DeploymentCreateRequest } tests := []struct { name string @@ -100,8 +101,8 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses all the traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, []interface{}{"0.0.0.0/0", "192.168.1.0/24"}), - req: &models.DeploymentCreateRequest{}, + filters: []string{"0.0.0.0/0", "192.168.1.0/24"}, + req: &models.DeploymentCreateRequest{}, }, want: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{Rulesets: []string{ @@ -112,8 +113,8 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses all the traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, []interface{}{"0.0.0.0/0", "192.168.1.0/24"}), - req: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{}}, + filters: []string{"0.0.0.0/0", "192.168.1.0/24"}, + req: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{}}, }, want: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{Rulesets: []string{ @@ -124,7 +125,7 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses all the traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, []interface{}{"0.0.0.0/0", "192.168.1.0/24"}), + filters: []string{"0.0.0.0/0", "192.168.1.0/24"}, req: &models.DeploymentCreateRequest{Settings: &models.DeploymentCreateSettings{ TrafficFilterSettings: &models.TrafficFilterSettings{ Rulesets: []string{"192.168.0.0/24"}, @@ -140,15 +141,20 @@ func Test_expandTrafficFilterCreate(t *testing.T) { { name: "parses no traffic filtering rules", args: args{ - v: schema.NewSet(schema.HashString, nil), - req: &models.DeploymentCreateRequest{}, + filters: nil, + req: &models.DeploymentCreateRequest{}, }, want: &models.DeploymentCreateRequest{}, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - expandTrafficFilterCreate(tt.args.v, tt.args.req) + var filters types.Set + diags := tfsdk.ValueFrom(context.Background(), tt.args.filters, types.SetType{ElemType: types.StringType}, &filters) + assert.Nil(t, diags) + + diags = trafficFilterToModel(context.Background(), filters, tt.args.req) + assert.Nil(t, diags) assert.Equal(t, tt.want, tt.args.req) }) } diff --git a/ec/internal/util/changes_test.go b/ec/ecresource/deploymentresource/deployment_not_found_test.go similarity index 50% rename from ec/internal/util/changes_test.go rename to ec/ecresource/deploymentresource/deployment_not_found_test.go index b78c35c9d..03e493661 100644 --- a/ec/internal/util/changes_test.go +++ b/ec/ecresource/deploymentresource/deployment_not_found_test.go @@ -15,26 +15,20 @@ // specific language governing permissions and limitations // under the License. -package util +package deploymentresource import ( "testing" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/go-openapi/runtime" + + "github.com/elastic/cloud-sdk-go/pkg/api/apierror" + "github.com/elastic/cloud-sdk-go/pkg/client/deployments" ) -func TestObjectRemoved(t *testing.T) { - schemaMap := map[string]*schema.Schema{ - "object": { - Type: schema.TypeList, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - } +func Test_deploymentNotFound(t *testing.T) { type args struct { - d *schema.ResourceData - key string + err error } tests := []struct { name string @@ -42,44 +36,39 @@ func TestObjectRemoved(t *testing.T) { want bool }{ { - name: "removes an object", + name: "When the error is empty, it returns false", + }, + { + name: "When the error is something else (500), it returns false", args: args{ - key: "object", - d: NewResourceData(t, ResDataParams{ - ID: "id", - Schema: schemaMap, - State: map[string]interface{}{ - "object": []interface{}{"a", "b"}, - }, - Change: map[string]interface{}{ - "object": []interface{}{}, - }, - }), + err: &apierror.Error{Err: &runtime.APIError{Code: 500}}, + }, + }, + { + name: "When the error is something else (401), it returns false", + args: args{ + err: &apierror.Error{Err: &deployments.GetDeploymentUnauthorized{}}, + }, + }, + { + name: "When the deployment is not found, it returns true", + args: args{ + err: &apierror.Error{Err: &deployments.GetDeploymentNotFound{}}, }, want: true, }, { - name: "does not remove an object", + name: "When the deployment is not authorized it returns true, to account for the DR case (ESS)", args: args{ - key: "object", - d: NewResourceData(t, ResDataParams{ - ID: "id", - Schema: schemaMap, - State: map[string]interface{}{ - "object": []interface{}{"a", "b"}, - }, - Change: map[string]interface{}{ - "object": []interface{}{"b"}, - }, - }), + err: &apierror.Error{Err: &runtime.APIError{Code: 403}}, }, - want: false, + want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := ObjectRemoved(tt.args.d, tt.args.key); got != tt.want { - t.Errorf("ObjectRemoved() = %v, want %v", got, tt.want) + if got := deploymentNotFound(tt.args.err); got != tt.want { + t.Errorf("deploymentNotFound() = %v, want %v", got, tt.want) } }) } diff --git a/ec/ecresource/deploymentresource/deployment_test.go b/ec/ecresource/deploymentresource/deployment_test.go new file mode 100644 index 000000000..d4c2804a4 --- /dev/null +++ b/ec/ecresource/deploymentresource/deployment_test.go @@ -0,0 +1,198 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package deploymentresource_test + +import ( + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + + provider "github.com/elastic/terraform-provider-ec/ec" +) + +func Test_createDeploymentWithEmptyFields(t *testing.T) { + requestId := "cuchxqanal0g8rmx9ljog7qrrpd68iitulaz2mrch1vuuihetgo5ge3f6555vn4s" + + deploymentWithDefaultsIoOptimized := fmt.Sprintf(` + resource "ec_deployment" "empty-declarations-IO-Optimized" { + request_id = "%s" + name = "my_deployment_name" + deployment_template_id = "aws-io-optimized-v2" + region = "us-east-1" + version = "8.4.3" + + elasticsearch = { + topology = { + "hot_content" = { + size = "8g" + autoscaling = {} + } + } + } + }`, + requestId, + ) + + createDeploymentResponseJson := []byte(` + { + "alias": "my-deployment-name", + "created": true, + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": [ + { + "kind": "elasticsearch", + "cloud_id": "my_deployment_name:cloud_id", + "region": "us-east-1", + "ref_id": "main-elasticsearch", + "credentials": { + "username": "elastic", + "password": "password" + }, + "id": "resource_id" + } + ], + "name": "my_deployment_name" + } + `) + + templateFileName := "testdata/aws-io-optimized-v2.json" + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + getTemplate(t, templateFileName), + createDeployment(t, readFile(t, "testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json"), createDeploymentResponseJson, requestId), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + mock.New202Response(io.NopCloser(strings.NewReader(""))), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + readRemoteClusters(t), + mock.New200Response(readTestData(t, "testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json")), + readRemoteClusters(t), + shutdownDeployment(t), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: deploymentWithDefaultsIoOptimized, + }, + }, + }) +} + +func getTemplate(t *testing.T, filename string) mock.Response { + return mock.New200ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultReadMockHeaders, + Method: "GET", + Path: "/api/v1/deployments/templates/aws-io-optimized-v2", + Query: url.Values{"region": {"us-east-1"}, "show_instance_configurations": {"false"}}, + }, + readTestData(t, filename), + ) +} + +func readFile(t *testing.T, fileName string) []byte { + t.Helper() + res, err := os.ReadFile(fileName) + if err != nil { + t.Fatalf(err.Error()) + } + return res +} + +func readTestData(t *testing.T, filename string) io.ReadCloser { + t.Helper() + f, err := os.Open(filename) + if err != nil { + t.Fatalf(err.Error()) + } + return f +} + +func createDeployment(t *testing.T, expectedRequestJson, responseJson []byte, requestId string) mock.Response { + t.Helper() + var expectedRequest *models.DeploymentCreateRequest + err := json.Unmarshal(expectedRequestJson, &expectedRequest) + if err != nil { + t.Fatalf(err.Error()) + } + + var response *models.DeploymentCreateResponse + err = json.Unmarshal(responseJson, &response) + if err != nil { + t.Fatalf(err.Error()) + } + + return mock.New201ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments", + Query: url.Values{"request_id": {requestId}}, + Body: mock.NewStructBody(expectedRequest), + }, + mock.NewStructBody(response), + ) +} + +func shutdownDeployment(t *testing.T) mock.Response { + t.Helper() + + return mock.New201ResponseAssertion( + &mock.RequestAssertion{ + Host: api.DefaultMockHost, + Header: api.DefaultWriteMockHeaders, + Method: "POST", + Path: "/api/v1/deployments/accd2e61fa835a5a32bb6b2938ce91f3/_shutdown", + Query: url.Values{"skip_snapshot": {"false"}}, + Body: io.NopCloser(strings.NewReader("")), + }, + io.NopCloser(strings.NewReader("")), + ) +} + +func readRemoteClusters(t *testing.T) mock.Response { + + return mock.New200StructResponse( + &models.RemoteResources{Resources: []*models.RemoteResourceRef{}}, + ) +} + +func protoV6ProviderFactoriesWithMockClient(client *api.API) map[string]func() (tfprotov6.ProviderServer, error) { + return map[string]func() (tfprotov6.ProviderServer, error){ + "ec": providerserver.NewProtocol6WithError(provider.ProviderWithClient(client, "unit-tests")), + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go new file mode 100644 index 000000000..58c04fc5d --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch.go @@ -0,0 +1,60 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTF struct { + Autoscale types.String `tfsdk:"autoscale"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` + RemoteCluster types.Set `tfsdk:"remote_cluster"` + SnapshotSource types.List `tfsdk:"snapshot_source"` + Extension types.Set `tfsdk:"extension"` + TrustAccount types.Set `tfsdk:"trust_account"` + TrustExternal types.Set `tfsdk:"trust_external"` + Strategy types.List `tfsdk:"strategy"` +} + +type Elasticsearch struct { + Autoscale *string `tfsdk:"autoscale"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + CloudID *string `tfsdk:"cloud_id"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology ElasticsearchTopologies `tfsdk:"topology"` + Config ElasticsearchConfigs `tfsdk:"config"` + RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` + SnapshotSource ElasticsearchSnapshotSources `tfsdk:"snapshot_source"` + Extension ElasticsearchExtensions `tfsdk:"extension"` + TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` + TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` + Strategy ElasticsearchStrategies `tfsdk:"strategy"` +} + +type Elasticsearches []Elasticsearch diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go new file mode 100644 index 000000000..d8878b9a0 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_config.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchConfigTF struct { + Plugins types.Set `tfsdk:"plugins"` + DockerImage types.String `tfsdk:"docker_image"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchConfig struct { + Plugins []string `tfsdk:"plugins"` + DockerImage *string `tfsdk:"docker_image"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchConfigs []ElasticsearchConfig diff --git a/ec/ecresource/trafficfilterassocresource/resource.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go similarity index 60% rename from ec/ecresource/trafficfilterassocresource/resource.go rename to ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go index 4a402d894..800c9cf60 100644 --- a/ec/ecresource/trafficfilterassocresource/resource.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_extension.go @@ -15,26 +15,26 @@ // specific language governing permissions and limitations // under the License. -package trafficfilterassocresource +package v1 import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// Resource returns the ec_deployment_traffic_filter_association resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud deployment traffic filtering association", - Schema: newSchema(), +type ElasticsearchExtensionTF struct { + Name types.String `tfsdk:"name"` + Type types.String `tfsdk:"type"` + Version types.String `tfsdk:"version"` + Url types.String `tfsdk:"url"` +} - CreateContext: create, - ReadContext: read, - DeleteContext: delete, +type ElasticsearchExtensionsTF types.Set - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(10 * time.Minute), - }, - } +type ElasticsearchExtension struct { + Name string `tfsdk:"name"` + Type string `tfsdk:"type"` + Version string `tfsdk:"version"` + Url string `tfsdk:"url"` } + +type ElasticsearchExtensions []ElasticsearchExtension diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go new file mode 100644 index 000000000..c2da22ee3 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_remote_cluster.go @@ -0,0 +1,38 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchRemoteClusterTF struct { + DeploymentId types.String `tfsdk:"deployment_id"` + Alias types.String `tfsdk:"alias"` + RefId types.String `tfsdk:"ref_id"` + SkipUnavailable types.Bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteCluster struct { + DeploymentId *string `tfsdk:"deployment_id"` + Alias *string `tfsdk:"alias"` + RefId *string `tfsdk:"ref_id"` + SkipUnavailable *bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteClusters []ElasticsearchRemoteCluster diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go new file mode 100644 index 000000000..cfb60f616 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_snapshot_source.go @@ -0,0 +1,34 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchSnapshotSourceTF struct { + SourceElasticsearchClusterId types.String `tfsdk:"source_elasticsearch_cluster_id"` + SnapshotName types.String `tfsdk:"snapshot_name"` +} + +type ElasticsearchSnapshotSource struct { + SourceElasticsearchClusterId string `tfsdk:"source_elasticsearch_cluster_id"` + SnapshotName string `tfsdk:"snapshot_name"` +} + +type ElasticsearchSnapshotSources []ElasticsearchSnapshotSource diff --git a/ec/ecresource/elasticsearchkeystoreresource/testutils.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go similarity index 68% rename from ec/ecresource/elasticsearchkeystoreresource/testutils.go rename to ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go index 6fc80835b..51cabca9f 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/testutils.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_strategy.go @@ -15,22 +15,20 @@ // specific language governing permissions and limitations // under the License. -package elasticsearchkeystoreresource +package v1 import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/types" ) -type resDataParams struct { - Resources map[string]interface{} - ID string +type ElasticsearchStrategyTF struct { + Type types.String `tfsdk:"type"` } -func newResourceData(t *testing.T, params resDataParams) *schema.ResourceData { - raw := schema.TestResourceDataRaw(t, newSchema(), params.Resources) - raw.SetId(params.ID) +type ElasticsearchStrategiesTF types.List - return raw +type ElasticsearchStrategy struct { + Type string `tfsdk:"type"` } + +type ElasticsearchStrategies []ElasticsearchStrategy diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go new file mode 100644 index 000000000..2b2b7c3cc --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology.go @@ -0,0 +1,54 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyTF struct { + Id types.String `tfsdk:"id"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeData types.String `tfsdk:"node_type_data"` + NodeTypeMaster types.String `tfsdk:"node_type_master"` + NodeTypeIngest types.String `tfsdk:"node_type_ingest"` + NodeTypeMl types.String `tfsdk:"node_type_ml"` + NodeRoles types.Set `tfsdk:"node_roles"` + Autoscaling types.List `tfsdk:"autoscaling"` + Config types.List `tfsdk:"config"` +} + +type ElasticsearchTopology struct { + Id string `tfsdk:"id"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeData *string `tfsdk:"node_type_data"` + NodeTypeMaster *string `tfsdk:"node_type_master"` + NodeTypeIngest *string `tfsdk:"node_type_ingest"` + NodeTypeMl *string `tfsdk:"node_type_ml"` + NodeRoles []string `tfsdk:"node_roles"` + Autoscaling ElasticsearchTopologyAutoscalings `tfsdk:"autoscaling"` + Config ElasticsearchTopologyConfigs `tfsdk:"config"` +} + +type ElasticsearchTopologies []ElasticsearchTopology diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go new file mode 100644 index 000000000..653c44e89 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_autoscaling.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyAutoscalingTF struct { + MaxSizeResource types.String `tfsdk:"max_size_resource"` + MaxSize types.String `tfsdk:"max_size"` + MinSizeResource types.String `tfsdk:"min_size_resource"` + MinSize types.String `tfsdk:"min_size"` + PolicyOverrideJson types.String `tfsdk:"policy_override_json"` +} + +type ElasticsearchTopologyAutoscaling struct { + MaxSizeResource *string `tfsdk:"max_size_resource"` + MaxSize *string `tfsdk:"max_size"` + MinSizeResource *string `tfsdk:"min_size_resource"` + MinSize *string `tfsdk:"min_size"` + PolicyOverrideJson *string `tfsdk:"policy_override_json"` +} + +type ElasticsearchTopologyAutoscalings []ElasticsearchTopologyAutoscaling diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go new file mode 100644 index 000000000..a7fe4c800 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_topology_config.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyConfigTF struct { + Plugins types.Set `tfsdk:"plugins"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchTopologyConfig struct { + Plugins []string `tfsdk:"plugins"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type ElasticsearchTopologyConfigs []ElasticsearchTopologyConfig diff --git a/ec/ecresource/elasticsearchkeystoreresource/resource.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go similarity index 59% rename from ec/ecresource/elasticsearchkeystoreresource/resource.go rename to ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go index 70fefacb0..a386cee0b 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/resource.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_account.go @@ -15,27 +15,24 @@ // specific language governing permissions and limitations // under the License. -package elasticsearchkeystoreresource +package v1 import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// Resource returns the ec_deployment_elasticsearch_keystore resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud deployment Elasticsearch keystore", - Schema: newSchema(), +type ElasticsearchTrustAccountTF struct { + AccountId types.String `tfsdk:"account_id"` + TrustAll types.Bool `tfsdk:"trust_all"` + TrustAllowlist types.Set `tfsdk:"trust_allowlist"` +} - CreateContext: create, - ReadContext: read, - UpdateContext: update, - DeleteContext: delete, +type ElasticsearchTrustAccountsTF types.Set - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(5 * time.Minute), - }, - } +type ElasticsearchTrustAccount struct { + AccountId *string `tfsdk:"account_id"` + TrustAll *bool `tfsdk:"trust_all"` + TrustAllowlist []string `tfsdk:"trust_allowlist"` } + +type ElasticsearchTrustAccounts []ElasticsearchTrustAccount diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go new file mode 100644 index 000000000..4c20cd1be --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/elasticsearch_trust_external.go @@ -0,0 +1,36 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTrustExternalTF struct { + RelationshipId types.String `tfsdk:"relationship_id"` + TrustAll types.Bool `tfsdk:"trust_all"` + TrustAllowlist types.Set `tfsdk:"trust_allowlist"` +} + +type ElasticsearchTrustExternal struct { + RelationshipId *string `tfsdk:"relationship_id"` + TrustAll *bool `tfsdk:"trust_all"` + TrustAllowlist []string `tfsdk:"trust_allowlist"` +} + +type ElasticsearchTrustExternals []ElasticsearchTrustExternal diff --git a/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go new file mode 100644 index 000000000..c2abf03ae --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v1/schema.go @@ -0,0 +1,509 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "strings" + + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// These constants are only used to determine whether or not a dedicated +// tier of masters or ingest (coordinating) nodes are set. +const ( + dataTierRolePrefix = "data_" + ingestDataTierRole = "ingest" + masterDataTierRole = "master" + autodetect = "autodetect" + growAndShrink = "grow_and_shrink" + rollingGrowAndShrink = "rolling_grow_and_shrink" + rollingAll = "rolling_all" +) + +// List of update strategies availables. +var strategiesList = []string{ + autodetect, growAndShrink, rollingGrowAndShrink, rollingAll, +} + +func ElasticsearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Required Elasticsearch resource definition", + Required: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "autoscale": { + Type: types.StringType, + Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Description: "Optional ref_id to set on the Elasticsearch resource", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "resource_id": { + Type: types.StringType, + Description: "The Elasticsearch resource unique identifier", + Computed: true, + }, + "region": { + Type: types.StringType, + Description: "The Elasticsearch resource region", + Computed: true, + }, + "cloud_id": { + Type: types.StringType, + Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTP endpoint", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTPs endpoint", + Computed: true, + }, + "topology": ElasticsearchTopologySchema(), + + "trust_account": ElasticsearchTrustAccountSchema(), + + "trust_external": ElasticsearchTrustExternalSchema(), + + "config": ElasticsearchConfigSchema(), + + "remote_cluster": ElasticsearchRemoteClusterSchema(), + + "snapshot_source": ElasticsearchSnapshotSourceSchema(), + + "extension": ElasticsearchExtensionSchema(), + + "strategy": ElasticsearchStrategySchema(), + }), + } +} + +func ElasticsearchConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Elasticsearch nodes will use. This option will not work in ESS customers and should only be changed if you know what you're doing.", + Optional: true, + }, + "plugins": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + }), + } +} + +func ElasticsearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Computed: true, + Optional: true, + Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "id": { + Type: types.StringType, + Description: `Required topology ID from the deployment template`, + Required: true, + }, + "instance_configuration_id": { + Type: types.StringType, + Description: `Computed Instance Configuration ID of the topology element`, + Computed: true, + }, + "size": { + Type: types.StringType, + Description: `Optional amount of memory per node in the "g" notation`, + Computed: true, + Optional: true, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, + Computed: true, + Optional: true, + }, + "node_type_data": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (data node)`, + Computed: true, + Optional: true, + }, + "node_type_master": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (master node)`, + Computed: true, + Optional: true, + }, + "node_type_ingest": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (ingest node)`, + Computed: true, + Optional: true, + }, + "node_type_ml": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (machine learning node)`, + Computed: true, + Optional: true, + }, + "node_roles": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: `The computed list of node roles for the current topology element`, + Computed: true, + }, + "autoscaling": ElasticsearchTopologyAutoscalingSchema(), + "config": ElasticsearchTopologyConfigSchema(), + }), + } +} + +func ElasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", + Optional: true, + Computed: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "max_size_resource": { + Description: "Maximum resource type for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "max_size": { + Description: "Maximum size value for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "min_size_resource": { + Description: "Minimum resource type for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "min_size": { + Description: "Minimum size value for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + }, + "policy_override_json": { + Type: types.StringType, + Description: "Computed policy overrides set directly via the API or other clients.", + Computed: true, + }, + }), + } +} + +func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Description: "Remote deployment ID", + Type: types.StringType, + Required: true, + }, + "alias": { + Description: "Alias for this Cross Cluster Search binding", + Type: types.StringType, + Required: true, + }, + "ref_id": { + Description: `Remote elasticsearch "ref_id", it is best left to the default value`, + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + Optional: true, + }, + "skip_unavailable": { + Description: "If true, skip the cluster during search when disconnected", + Type: types.BoolType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + Optional: true, + }, + }), + } +} + +func ElasticsearchSnapshotSourceSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "source_elasticsearch_cluster_id": { + Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", + Type: types.StringType, + Required: true, + }, + "snapshot_name": { + Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "__latest_success__"}), + }, + Optional: true, + Computed: true, + }, + }), + } +} + +func ElasticsearchExtensionSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "name": { + Description: "Extension name.", + Type: types.StringType, + Required: true, + }, + "type": { + Description: "Extension type, only `bundle` or `plugin` are supported.", + Type: types.StringType, + Required: true, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, + }, + "version": { + Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", + Type: types.StringType, + Required: true, + }, + "url": { + Description: "Bundle or plugin URL, the extension URL can be obtained from the `ec_deployment_extension..url` attribute or the API and cannot be a random HTTP address that is hosted elsewhere.", + Type: types.StringType, + Required: true, + }, + }), + } +} + +func ElasticsearchTrustAccountSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch account trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "account_id": { + Description: "The ID of the Account.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func ElasticsearchTrustExternalSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch external trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "relationship_id": { + Description: "The ID of the external trust relationship.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func ElasticsearchStrategySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Configuration strategy settings.", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "type": { + Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), + Type: types.StringType, + Required: true, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, + }, + }), + } +} + +func ElasticsearchTopologyConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Computed read-only configuration to avoid unsetting plan settings from 'topology.elasticsearch'`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + planmodifier.DefaultValue(types.List{ + Null: true, + ElemType: types.ObjectType{ + AttrTypes: map[string]attr.Type{ + "plugins": types.SetType{ + ElemType: types.StringType, + }, + "user_settings_json": types.StringType, + "user_settings_override_json": types.StringType, + "user_settings_yaml": types.StringType, + "user_settings_override_yaml": types.StringType, + }, + }, + }), + }, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "plugins": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go new file mode 100644 index 000000000..b85298032 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_config.go @@ -0,0 +1,120 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + "reflect" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type ElasticsearchConfig v1.ElasticsearchConfig + +func (c *ElasticsearchConfig) IsEmpty() bool { + return c == nil || reflect.ValueOf(*c).IsZero() +} + +func readElasticsearchConfig(in *models.ElasticsearchConfiguration) (*ElasticsearchConfig, error) { + var config ElasticsearchConfig + + if in == nil { + return &ElasticsearchConfig{}, nil + } + + if len(in.EnabledBuiltInPlugins) > 0 { + config.Plugins = append(config.Plugins, in.EnabledBuiltInPlugins...) + } + + if in.UserSettingsYaml != "" { + config.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + config.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + config.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + config.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + config.DockerImage = ec.String(in.DockerImage) + } + + return &config, nil +} + +func elasticsearchConfigPayload(ctx context.Context, cfgObj attr.Value, model *models.ElasticsearchConfiguration) (*models.ElasticsearchConfiguration, diag.Diagnostics) { + if cfgObj.IsNull() || cfgObj.IsUnknown() { + return model, nil + } + + var cfg v1.ElasticsearchConfigTF + + diags := tfsdk.ValueAs(ctx, cfgObj, &cfg) + + if diags.HasError() { + return nil, diags + } + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &model.UserSettingsJSON); err != nil { + diags.AddError("failed expanding elasticsearch user_settings_json", err.Error()) + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &model.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding elasticsearch user_settings_override_json", err.Error()) + } + } + + if !cfg.UserSettingsYaml.IsNull() { + model.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + model.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + ds := cfg.Plugins.ElementsAs(ctx, &model.EnabledBuiltInPlugins, true) + + diags = append(diags, ds...) + + if !cfg.DockerImage.IsNull() { + model.DockerImage = cfg.DockerImage.Value + } + + return model, diags +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go new file mode 100644 index 000000000..c0faa0392 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_extension.go @@ -0,0 +1,136 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchExtensions v1.ElasticsearchExtensions + +func readElasticsearchExtensions(in *models.ElasticsearchConfiguration) (ElasticsearchExtensions, error) { + if len(in.UserBundles) == 0 && len(in.UserPlugins) == 0 { + return nil, nil + } + + extensions := make(ElasticsearchExtensions, 0, len(in.UserBundles)+len(in.UserPlugins)) + + for _, model := range in.UserBundles { + extension, err := readFromUserBundle(model) + if err != nil { + return nil, err + } + + extensions = append(extensions, *extension) + } + + for _, model := range in.UserPlugins { + extension, err := readFromUserPlugin(model) + if err != nil { + return nil, err + } + + extensions = append(extensions, *extension) + } + + return extensions, nil +} + +func elasticsearchExtensionPayload(ctx context.Context, extensions types.Set, es *models.ElasticsearchConfiguration) diag.Diagnostics { + for _, elem := range extensions.Elems { + var extension v1.ElasticsearchExtensionTF + + if diags := tfsdk.ValueAs(ctx, elem, &extension); diags.HasError() { + return diags + } + + version := extension.Version.Value + url := extension.Url.Value + name := extension.Name.Value + + if extension.Type.Value == "bundle" { + es.UserBundles = append(es.UserBundles, &models.ElasticsearchUserBundle{ + Name: &name, + ElasticsearchVersion: &version, + URL: &url, + }) + } + + if extension.Type.Value == "plugin" { + es.UserPlugins = append(es.UserPlugins, &models.ElasticsearchUserPlugin{ + Name: &name, + ElasticsearchVersion: &version, + URL: &url, + }) + } + } + return nil +} + +func readFromUserBundle(in *models.ElasticsearchUserBundle) (*v1.ElasticsearchExtension, error) { + var ext v1.ElasticsearchExtension + + ext.Type = "bundle" + + if in.ElasticsearchVersion == nil { + return nil, utils.MissingField("ElasticsearchUserBundle.ElasticsearchVersion") + } + ext.Version = *in.ElasticsearchVersion + + if in.URL == nil { + return nil, utils.MissingField("ElasticsearchUserBundle.URL") + } + ext.Url = *in.URL + + if in.Name == nil { + return nil, utils.MissingField("ElasticsearchUserBundle.Name") + } + ext.Name = *in.Name + + return &ext, nil +} + +func readFromUserPlugin(in *models.ElasticsearchUserPlugin) (*v1.ElasticsearchExtension, error) { + var ext v1.ElasticsearchExtension + + ext.Type = "plugin" + + if in.ElasticsearchVersion == nil { + return nil, utils.MissingField("ElasticsearchUserPlugin.ElasticsearchVersion") + } + ext.Version = *in.ElasticsearchVersion + + if in.URL == nil { + return nil, utils.MissingField("ElasticsearchUserPlugin.URL") + } + ext.Url = *in.URL + + if in.Name == nil { + return nil, utils.MissingField("ElasticsearchUserPlugin.Name") + } + ext.Name = *in.Name + + return &ext, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go new file mode 100644 index 000000000..c9b263e40 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload.go @@ -0,0 +1,281 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "strings" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "golang.org/x/exp/slices" +) + +type ElasticsearchTF struct { + Autoscale types.Bool `tfsdk:"autoscale"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + CloudID types.String `tfsdk:"cloud_id"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.Map `tfsdk:"topology"` + Config types.Object `tfsdk:"config"` + RemoteCluster types.Set `tfsdk:"remote_cluster"` + SnapshotSource types.Object `tfsdk:"snapshot_source"` + Extension types.Set `tfsdk:"extension"` + TrustAccount types.Set `tfsdk:"trust_account"` + TrustExternal types.Set `tfsdk:"trust_external"` + Strategy types.String `tfsdk:"strategy"` +} + +func ElasticsearchPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2, dtID, version string, useNodeRoles bool, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { + var es *ElasticsearchTF + + if esObj.IsNull() || esObj.IsUnknown() { + return nil, nil + } + + if diags := tfsdk.ValueAs(ctx, esObj, &es); diags.HasError() { + return nil, diags + } + + if es == nil { + return nil, nil + } + + templatePayload := EnrichElasticsearchTemplate(payloadFromTemplate(template), dtID, version, useNodeRoles) + + payload, diags := es.payload(ctx, templatePayload, skipTopologies) + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +func (es *ElasticsearchTF) payload(ctx context.Context, res *models.ElasticsearchPayload, skipTopologies bool) (*models.ElasticsearchPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !es.RefId.IsNull() { + res.RefID = &es.RefId.Value + } + + if es.Region.Value != "" { + res.Region = &es.Region.Value + } + + // Unsetting the curation properties is since they're deprecated since + // >= 6.6.0 which is when ILM is introduced in Elasticsearch. + unsetElasticsearchCuration(res) + + var ds diag.Diagnostics + + if !skipTopologies { + diags.Append(es.topologiesPayload(ctx, res.Plan.ClusterTopology)...) + } + + // Fixes the node_roles field to remove the dedicated tier roles from the + // list when these are set as a dedicated tier as a topology element. + updateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) + + res.Plan.Elasticsearch, ds = elasticsearchConfigPayload(ctx, es.Config, res.Plan.Elasticsearch) + diags.Append(ds...) + + diags.Append(elasticsearchSnapshotSourcePayload(ctx, es.SnapshotSource, res.Plan)...) + + diags.Append(elasticsearchExtensionPayload(ctx, es.Extension, res.Plan.Elasticsearch)...) + + if !es.Autoscale.IsNull() && !es.Autoscale.IsUnknown() { + res.Plan.AutoscalingEnabled = &es.Autoscale.Value + } + + res.Settings, ds = elasticsearchTrustAccountPayload(ctx, es.TrustAccount, res.Settings) + diags.Append(ds...) + + res.Settings, ds = elasticsearchTrustExternalPayload(ctx, es.TrustExternal, res.Settings) + diags.Append(ds...) + + elasticsearchStrategyPayload(es.Strategy, res.Plan) + + return res, diags +} + +func (es *ElasticsearchTF) topologies(ctx context.Context) (map[string]ElasticsearchTopologyTF, diag.Diagnostics) { + var topologies map[string]ElasticsearchTopologyTF + if diags := es.Topology.ElementsAs(ctx, &topologies, true); diags.HasError() { + return nil, diags + } + + return topologies, nil +} + +func (es *ElasticsearchTF) topologiesPayload(ctx context.Context, topologyModels []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + tiers, diags := es.topologies(ctx) + + if diags.HasError() { + return diags + } + + for id, tier := range tiers { + diags.Append(tier.payload(ctx, id, topologyModels)...) + } + + return diags +} + +func unsetElasticsearchCuration(payload *models.ElasticsearchPayload) { + if payload.Plan.Elasticsearch != nil { + payload.Plan.Elasticsearch.Curation = nil + } + + if payload.Settings != nil { + payload.Settings.Curation = nil + } +} + +func updateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTopologyElement) { + dataTier, hasMasterTier, hasIngestTier := dedicatedTopoogies(topologies) + // This case is not very likely since all deployments will have a data tier. + // It's here because the code path is technically possible and it's better + // than a straight panic. + if dataTier == nil { + return + } + + if hasIngestTier { + dataTier.NodeRoles = removeItemFromSlice( + dataTier.NodeRoles, ingestDataTierRole, + ) + } + if hasMasterTier { + dataTier.NodeRoles = removeItemFromSlice( + dataTier.NodeRoles, masterDataTierRole, + ) + } +} + +func removeItemFromSlice(slice []string, item string) []string { + i := slices.Index(slice, item) + + if i == -1 { + return slice + } + + return slices.Delete(slice, i, i+1) +} + +func dedicatedTopoogies(topologies []*models.ElasticsearchClusterTopologyElement) (dataTier *models.ElasticsearchClusterTopologyElement, hasMasterTier, hasIngestTier bool) { + for _, topology := range topologies { + var hasSomeDataRole bool + var hasMasterRole bool + var hasIngestRole bool + for _, role := range topology.NodeRoles { + sizeNonZero := *topology.Size.Value > 0 + if strings.HasPrefix(role, dataTierRolePrefix) && sizeNonZero { + hasSomeDataRole = true + } + if role == ingestDataTierRole && sizeNonZero { + hasIngestRole = true + } + if role == masterDataTierRole && sizeNonZero { + hasMasterRole = true + } + } + + if !hasSomeDataRole && hasMasterRole { + hasMasterTier = true + } + + if !hasSomeDataRole && hasIngestRole { + hasIngestTier = true + } + + if hasSomeDataRole && hasMasterRole { + dataTier = topology + } + } + + return dataTier, hasMasterTier, hasIngestTier +} + +func elasticsearchStrategyPayload(strategy types.String, payload *models.ElasticsearchClusterPlan) { + createModelIfNeeded := func() { + if payload.Transient == nil { + payload.Transient = &models.TransientElasticsearchPlanConfiguration{ + Strategy: &models.PlanStrategy{}, + } + } + } + + switch strategy.Value { + case autodetect: + createModelIfNeeded() + payload.Transient.Strategy.Autodetect = new(models.AutodetectStrategyConfig) + case growAndShrink: + createModelIfNeeded() + payload.Transient.Strategy.GrowAndShrink = new(models.GrowShrinkStrategyConfig) + case rollingGrowAndShrink: + createModelIfNeeded() + payload.Transient.Strategy.RollingGrowAndShrink = new(models.RollingGrowShrinkStrategyConfig) + case rollingAll: + createModelIfNeeded() + payload.Transient.Strategy.Rolling = &models.RollingStrategyConfig{ + GroupBy: "__all__", + } + } +} + +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.ElasticsearchPayload { + if template == nil || len(template.DeploymentTemplate.Resources.Elasticsearch) == 0 { + return &models.ElasticsearchPayload{ + Plan: &models.ElasticsearchClusterPlan{ + Elasticsearch: &models.ElasticsearchConfiguration{}, + }, + Settings: &models.ElasticsearchClusterSettings{}, + } + } + return template.DeploymentTemplate.Resources.Elasticsearch[0] +} + +func EnrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, templateId, version string, useNodeRoles bool) *models.ElasticsearchPayload { + if tpl.Plan.DeploymentTemplate == nil { + tpl.Plan.DeploymentTemplate = &models.DeploymentTemplateReference{} + } + + if tpl.Plan.DeploymentTemplate.ID == nil || *tpl.Plan.DeploymentTemplate.ID == "" { + tpl.Plan.DeploymentTemplate.ID = ec.String(templateId) + } + + if tpl.Plan.Elasticsearch.Version == "" { + tpl.Plan.Elasticsearch.Version = version + } + + for _, topology := range tpl.Plan.ClusterTopology { + if useNodeRoles { + topology.NodeType = nil + continue + } + topology.NodeRoles = nil + } + + return tpl +} diff --git a/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go similarity index 73% rename from ec/ecresource/deploymentresource/elasticsearch_expanders_test.go rename to ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go index 26a473c30..e31e8c215 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_expanders_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_payload_test.go @@ -15,24 +15,29 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( - "errors" + "context" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" ) -func Test_expandEsResource(t *testing.T) { - tplPath := "testdata/template-aws-io-optimized-v2.json" +func Test_writeElasticsearch(t *testing.T) { + tplPath := "../../testdata/template-aws-io-optimized-v2.json" tp770 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, tplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.7.0", false, @@ -40,8 +45,8 @@ func Test_expandEsResource(t *testing.T) { } create710 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, tplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.10.0", true, @@ -49,18 +54,18 @@ func Test_expandEsResource(t *testing.T) { } update711 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, tplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, tplPath)), "aws-io-optimized-v2", "7.11.0", true, ) } - hotWarmTplPath := "testdata/template-aws-hot-warm-v2.json" + hotWarmTplPath := "../../testdata/template-aws-hot-warm-v2.json" hotWarmTpl770 := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, hotWarmTplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), "aws-io-optimized-v2", "7.7.0", false, @@ -68,18 +73,18 @@ func Test_expandEsResource(t *testing.T) { } hotWarm7111Tpl := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, hotWarmTplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, hotWarmTplPath)), "aws-io-optimized-v2", "7.11.1", true, ) } - eceDefaultTplPath := "testdata/template-ece-3.0.0-default.json" + eceDefaultTplPath := "../../testdata/template-ece-3.0.0-default.json" eceDefaultTpl := func() *models.ElasticsearchPayload { - return enrichElasticsearchTemplate( - esResource(parseDeploymentTemplate(t, eceDefaultTplPath)), + return EnrichElasticsearchTemplate( + payloadFromTemplate(testutil.ParseDeploymentTemplate(t, eceDefaultTplPath)), "aws-io-optimized-v2", "7.17.3", true, @@ -87,37 +92,38 @@ func Test_expandEsResource(t *testing.T) { } type args struct { - ess []interface{} - dt *models.ElasticsearchPayload + es Elasticsearch + template *models.DeploymentTemplateInfoV2 + templateID string + version string + useNodeRoles bool } tests := []struct { - name string - args args - want []*models.ElasticsearchPayload - err error + name string + args args + want *models.ElasticsearchPayload + diags diag.Diagnostics }{ - { - name: "returns nil when there's no resources", - }, { name: "parses an ES resource", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -168,21 +174,23 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with empty version (7.10.0) in state uses node_roles from the DT", args: args{ - dt: create710(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.10.0", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(create710(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(create710(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -236,24 +244,24 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with version 7.11.0 has node_roles coming from the saved state", args: args{ - dt: update711(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - "node_roles": schema.NewSet(schema.HashString, []interface{}{ - "a", "b", "c", - }), - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + NodeRoles: []string{"a", "b", "c"}, + }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.0", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(update711(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(update711(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -299,40 +307,20 @@ func Test_expandEsResource(t *testing.T) { }, }), }, - { - name: "parses an ES resource with invalid id", - args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "invalid", - "size": "2g", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`elasticsearch topology invalid: invalid id: valid topology IDs are "coordinating", "hot_content", "warm", "cold", "master", "ml"`), - }, { name: "parses an ES resource without a topology", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -383,29 +371,27 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "deployment_template_id": "aws-hot-warm-v2", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "zone_count": 1, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, + "warm": { + Size: ec.String("2g"), + ZoneCount: 1, }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -487,33 +473,30 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with config (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "deployment_template_id": "aws-hot-warm-v2", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "somesetting: true", - }}, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "zone_count": 1, - }, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Config: &ElasticsearchConfig{ + UserSettingsYaml: ec.String("somesetting: true"), + }, + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, + "warm": { + Size: ec.String("2g"), + ZoneCount: 1, }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -593,126 +576,20 @@ func Test_expandEsResource(t *testing.T) { }, }), }, - { - name: "parses an ES resource with explicit nils", - args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "deployment_template_id": "aws-hot-warm-v2", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": nil, - }}, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": nil, - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "zone_count": nil, - }, - }, - }, - }, - }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ - Region: ec.String("some-region"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - Curation: nil, - UserSettingsYaml: "", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "hot", - }, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, { name: "parses an ES resource without a topology (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -794,27 +671,28 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with node type overrides (HotWarm)", args: args{ - dt: hotWarmTpl770(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "node_type_data": "false", - "node_type_master": "false", - "node_type_ingest": "false", - "node_type_ml": "true", - }, - map[string]interface{}{ - "id": "warm", - "node_type_master": "true", - }, - }, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + NodeTypeData: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMl: ec.String("true"), + }, + "warm": { + NodeTypeMaster: ec.String("true"), + }, + }, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-hot-warm-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarmTpl770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -897,31 +775,31 @@ func Test_expandEsResource(t *testing.T) { { name: "migrates old node_type state to new node_roles payload when the cold tier is set", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "node_type_data": "false", - "node_type_master": "false", - "node_type_ingest": "false", - "node_type_ml": "true", - }, - map[string]interface{}{ - "id": "warm", - "node_type_master": "true", - }, - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - }, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + NodeTypeData: ec.String("false"), + NodeTypeMaster: ec.String("false"), + NodeTypeIngest: ec.String("false"), + NodeTypeMl: ec.String("true"), + }, + "warm": { + NodeTypeMaster: ec.String("true"), + }, + "cold": { + Size: ec.String("2g"), + }, + }, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1033,27 +911,23 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - }, - map[string]interface{}{ - "id": "warm", - }, - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, + es: Elasticsearch{ + Autoscale: ec.Bool(true), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": {}, + "warm": {}, + "cold": {Size: ec.String("2g")}, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1165,52 +1039,43 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled overriding the size with ml", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "58g", - }, + es: Elasticsearch{ + Autoscale: ec.Bool(true), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("58g"), }, }, - map[string]interface{}{ - "id": "warm", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "29g", - }, + "warm": { + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), }, }, - map[string]interface{}{ - "id": "cold", - "size": "2g", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "29g", - }, + "cold": { + Size: ec.String("2g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), }, }, - map[string]interface{}{ - "id": "ml", - "size": "1g", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "29g", - "min_size": "1g", - }, + "ml": { + Size: ec.String("1g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("29g"), + MinSize: ec.String("1g"), }, }, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1349,35 +1214,32 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled no dimension in template, default resource", args: args{ - dt: eceDefaultTpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "450g", - "min_size": "2g", - }, + es: Elasticsearch{ + Autoscale: ec.Bool(true), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("450g"), + MinSize: ec.String("2g"), }, }, - map[string]interface{}{ - "id": "master", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "250g", - "min_size": "1g", - }, + "master": { + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("250g"), + MinSize: ec.String("1g"), }, }, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-ece-3.0.0-default.json"), + templateID: "aws-io-optimized-v2", + version: "7.17.3", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(eceDefaultTpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(eceDefaultTpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1464,48 +1326,41 @@ func Test_expandEsResource(t *testing.T) { { name: "autoscaling enabled overriding the size and resources", args: args{ - dt: hotWarm7111Tpl(), - ess: []interface{}{map[string]interface{}{ - "autoscale": "true", - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size_resource": "storage", - "max_size": "450g", - }, + es: Elasticsearch{ + Autoscale: ec.Bool(true), + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("450g"), + MaxSizeResource: ec.String("storage"), }, }, - map[string]interface{}{ - "id": "warm", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size_resource": "storage", - "max_size": "870g", - }, + "warm": { + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("870g"), + MaxSizeResource: ec.String("storage"), }, }, - map[string]interface{}{ - "id": "cold", - "size": "4g", - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size_resource": "storage", - "max_size": "1740g", - - "min_size_resource": "storage", - "min_size": "4g", - }, + "cold": { + Size: ec.String("4g"), + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("1740g"), + MaxSizeResource: ec.String("storage"), + MinSizeResource: ec.String("storage"), + MinSize: ec.String("4g"), }, }, }, - }}, + }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-hot-warm-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.11.1", + useNodeRoles: true, }, - want: enrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(hotWarm7111Tpl(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1621,30 +1476,30 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with plugins", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - "plugins": schema.NewSet(schema.HashString, []interface{}{ - "plugin", - }), - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Config: &ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + Plugins: []string{"plugin"}, + }, + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1702,25 +1557,27 @@ func Test_expandEsResource(t *testing.T) { { name: "parses an ES resource with snapshot settings", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "snapshot_source": []interface{}{map[string]interface{}{ - "snapshot_name": "__latest_success__", - "source_elasticsearch_cluster_id": mock.ValidClusterID, - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + SnapshotSource: &ElasticsearchSnapshotSource{ + SnapshotName: "__latest_success__", + SourceElasticsearchClusterId: mock.ValidClusterID, + }, + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1775,26 +1632,24 @@ func Test_expandEsResource(t *testing.T) { { name: "parse autodetect configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "autodetect", - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, + Strategy: ec.String("autodetect"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1850,26 +1705,24 @@ func Test_expandEsResource(t *testing.T) { { name: "parse grow_and_shrink configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "grow_and_shrink", - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, + Strategy: ec.String("grow_and_shrink"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -1925,26 +1778,24 @@ func Test_expandEsResource(t *testing.T) { { name: "parse rolling_grow_and_shrink configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "rolling_grow_and_shrink", - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, + Strategy: ec.String("rolling_grow_and_shrink"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2000,26 +1851,25 @@ func Test_expandEsResource(t *testing.T) { { name: "parse rolling configuration strategy", args: args{ - dt: tp770(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "zone_count": 1, - }}, - "strategy": []interface{}{map[string]interface{}{ - "type": "rolling_all", - }}, + es: Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + Topology: ElasticsearchTopologies{ + "hot_content": { + Size: ec.String("2g"), + ZoneCount: 1, + }, }, + Strategy: ec.String("rolling_all"), }, + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + templateID: "aws-io-optimized-v2", + version: "7.7.0", + useNodeRoles: false, }, - want: enrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ + want: EnrichWithEmptyTopologies(tp770(), &models.ElasticsearchPayload{ Region: ec.String("some-region"), RefID: ec.String("main-elasticsearch"), Settings: &models.ElasticsearchClusterSettings{ @@ -2077,16 +1927,56 @@ func Test_expandEsResource(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := expandEsResources(tt.args.ess, tt.args.dt) - if err != nil { - var msg string - if tt.err != nil { - msg = tt.err.Error() - } - assert.EqualError(t, err, msg) + var elasticsearch types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.es, ElasticsearchSchema().FrameworkType(), &elasticsearch) + assert.Nil(t, diags) + + got, diags := ElasticsearchPayload(context.Background(), elasticsearch, tt.args.template, tt.args.templateID, tt.args.version, tt.args.useNodeRoles, false) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) } + }) + } +} + +func Test_removeItemFromSlice(t *testing.T) { + type args struct { + slice []string + item string + } + + tests := []struct { + name string + args args + expected []string + }{ + { + name: "it should NOT modify slice if the slice doesn't contain item", + args: args{ + slice: []string{"second"}, + item: "first", + }, + expected: []string{"second"}, + }, + + { + name: "it should remove an item from slice if the slice contains it", + args: args{ + slice: []string{"first", "second"}, + item: "first", + }, + expected: []string{"second"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := removeItemFromSlice(tt.args.slice, tt.args.item) - assert.Equal(t, tt.want, got) + assert.Equal(t, tt.expected, got) }) } } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go new file mode 100644 index 000000000..37b8b2231 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type Elasticsearch struct { + Autoscale *bool `tfsdk:"autoscale"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + CloudID *string `tfsdk:"cloud_id"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology ElasticsearchTopologies `tfsdk:"topology"` + Config *ElasticsearchConfig `tfsdk:"config"` + RemoteCluster ElasticsearchRemoteClusters `tfsdk:"remote_cluster"` + SnapshotSource *ElasticsearchSnapshotSource `tfsdk:"snapshot_source"` + Extension ElasticsearchExtensions `tfsdk:"extension"` + TrustAccount ElasticsearchTrustAccounts `tfsdk:"trust_account"` + TrustExternal ElasticsearchTrustExternals `tfsdk:"trust_external"` + Strategy *string `tfsdk:"strategy"` +} + +func ReadElasticsearches(in []*models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { + for _, model := range in { + if util.IsCurrentEsPlanEmpty(model) || IsElasticsearchStopped(model) { + continue + } + es, err := readElasticsearch(model, remotes) + if err != nil { + return nil, err + } + return es, nil + } + + return nil, nil +} + +func readElasticsearch(in *models.ElasticsearchResourceInfo, remotes *models.RemoteResources) (*Elasticsearch, error) { + var es Elasticsearch + + if util.IsCurrentEsPlanEmpty(in) || IsElasticsearchStopped(in) { + return &es, nil + } + + if in.Info.ClusterID != nil && *in.Info.ClusterID != "" { + es.ResourceId = in.Info.ClusterID + } + + if in.RefID != nil && *in.RefID != "" { + es.RefId = in.RefID + } + + if in.Region != nil { + es.Region = in.Region + } + + plan := in.Info.PlanInfo.Current.Plan + var err error + + topologies, err := readElasticsearchTopologies(plan) + if err != nil { + return nil, err + } + es.Topology = topologies + + if plan.AutoscalingEnabled != nil { + es.Autoscale = plan.AutoscalingEnabled + } + + if meta := in.Info.Metadata; meta != nil && meta.CloudID != "" { + es.CloudID = &meta.CloudID + } + + es.HttpEndpoint, es.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + es.Config, err = readElasticsearchConfig(plan.Elasticsearch) + if err != nil { + return nil, err + } + + clusters, err := readElasticsearchRemoteClusters(remotes.Resources) + if err != nil { + return nil, err + } + es.RemoteCluster = clusters + + extensions, err := readElasticsearchExtensions(plan.Elasticsearch) + if err != nil { + return nil, err + } + es.Extension = extensions + + accounts, err := readElasticsearchTrustAccounts(in.Info.Settings) + if err != nil { + return nil, err + } + es.TrustAccount = accounts + + externals, err := readElasticsearchTrustExternals(in.Info.Settings) + if err != nil { + return nil, err + } + es.TrustExternal = externals + + return &es, nil +} + +// IsElasticsearchStopped returns true if the resource is stopped. +func IsElasticsearchStopped(res *models.ElasticsearchResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go similarity index 60% rename from ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go rename to ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go index 33302f76c..d300a9c3e 100644 --- a/ec/ecresource/deploymentresource/elasticsearch_flatteners_test.go +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_read_test.go @@ -15,34 +15,36 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) -func Test_flattenEsResource(t *testing.T) { +func Test_readElasticsearch(t *testing.T) { type args struct { in []*models.ElasticsearchResourceInfo - name string remotes models.RemoteResources } tests := []struct { name string args args - want []interface{} - err string + want *Elasticsearch }{ { name: "empty resource list returns empty list", args: args{in: []*models.ElasticsearchResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -55,7 +57,7 @@ func Test_flattenEsResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses an elasticsearch resource", @@ -147,28 +149,26 @@ func Test_flattenEsResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "cloud_id": "some CLOUD ID", - "http_endpoint": "http://somecluster.cloud.elastic.co:9200", - "https_endpoint": "https://somecluster.cloud.elastic.co:9243", - "config": func() []interface{} { return nil }(), - "topology": []interface{}{ - map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "size_resource": "memory", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": int32(1), - }, + want: &Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + CloudID: ec.String("some CLOUD ID"), + HttpEndpoint: ec.String("http://somecluster.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://somecluster.cloud.elastic.co:9243"), + Config: &ElasticsearchConfig{}, + Topology: ElasticsearchTopologies{ + "hot_content": { + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, }, }, @@ -226,56 +226,60 @@ func Test_flattenEsResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://othercluster.cloud.elastic.co:9200", - "https_endpoint": "https://othercluster.cloud.elastic.co:9243", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "size_resource": "memory", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": int32(1), - }}, - }}, + want: &Elasticsearch{ + RefId: ec.String("main-elasticsearch"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://othercluster.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://othercluster.cloud.elastic.co:9243"), + Config: &ElasticsearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + Topology: ElasticsearchTopologies{ + "hot_content": { + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeMl: ec.String("false"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, + }, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := flattenEsResources(tt.args.in, tt.args.name, tt.args.remotes) - if err != nil && !assert.EqualError(t, err, tt.err) { - t.Error(err) - } + got, err := ReadElasticsearches(tt.args.in, &tt.args.remotes) + assert.Nil(t, err) assert.Equal(t, tt.want, got) + + var esObj types.Object + diags := tfsdk.ValueFrom(context.Background(), got, ElasticsearchSchema().FrameworkType(), &esObj) + assert.Nil(t, diags) }) } } -func Test_flattenEsTopology(t *testing.T) { +func Test_readElasticsearchTopology(t *testing.T) { type args struct { plan *models.ElasticsearchClusterPlan } tests := []struct { name string args args - want []interface{} + want ElasticsearchTopologies err string }{ { - name: "no zombie topologies", + name: "all topologies (even with 0 size) are returned", args: args{plan: &models.ElasticsearchClusterPlan{ ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ { @@ -301,17 +305,27 @@ func Test_flattenEsTopology(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "4g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - }}, + want: ElasticsearchTopologies{ + "coordinating": { + id: "coordinating", + InstanceConfigurationId: ec.String("aws.coordinating.m5"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 2, + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, + "hot_content": { + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + Autoscaling: &ElasticsearchTopologyAutoscaling{}, + }, + }, }, { name: "includes unsized autoscaling topologies", @@ -347,32 +361,29 @@ func Test_flattenEsTopology(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "4g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", + want: ElasticsearchTopologies{ + "hot_content": { + id: "hot_content", + InstanceConfigurationId: ec.String("aws.data.highio.i3"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeData: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMaster: ec.String("true"), + Autoscaling: &ElasticsearchTopologyAutoscaling{}, }, - map[string]interface{}{ - "config": func() []interface{} { return nil }(), - "id": "ml", - "instance_configuration_id": "aws.ml.m5", - "size": "0g", - "size_resource": "memory", - "zone_count": int32(1), - "autoscaling": []interface{}{ - map[string]interface{}{ - "max_size": "8g", - "max_size_resource": "memory", - "min_size": "0g", - "min_size_resource": "memory", - }, + "ml": { + id: "ml", + InstanceConfigurationId: ec.String("aws.ml.m5"), + Size: ec.String("0g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Autoscaling: &ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("8g"), + MaxSizeResource: ec.String("memory"), + MinSize: ec.String("0g"), + MinSizeResource: ec.String("memory"), }, }, }, @@ -380,7 +391,7 @@ func Test_flattenEsTopology(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := flattenEsTopology(tt.args.plan) + got, err := readElasticsearchTopologies(tt.args.plan) if err != nil && !assert.EqualError(t, err, tt.err) { t.Error(err) } @@ -389,36 +400,65 @@ func Test_flattenEsTopology(t *testing.T) { } } -func Test_flattenEsConfig(t *testing.T) { +func Test_readElasticsearchConfig(t *testing.T) { type args struct { cfg *models.ElasticsearchConfiguration } tests := []struct { name string args args - want []interface{} + want *ElasticsearchConfig }{ { - name: "flattens plugins allowlist", + name: "read plugins allowlist", args: args{cfg: &models.ElasticsearchConfiguration{ EnabledBuiltInPlugins: []string{"some-allowed-plugin"}, }}, - want: []interface{}{map[string]interface{}{ - "plugins": []interface{}{"some-allowed-plugin"}, - }}, + want: &ElasticsearchConfig{ + Plugins: []string{"some-allowed-plugin"}, + }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEsConfig(tt.args.cfg) - for _, g := range got { - var rawVal []interface{} - m := g.(map[string]interface{}) - if v, ok := m["plugins"]; ok { - rawVal = v.(*schema.Set).List() - } - m["plugins"] = rawVal - } + got, err := readElasticsearchConfig(tt.args.cfg) + assert.Nil(t, err) + assert.Equal(t, tt.want, got) + + var config types.Object + diags := tfsdk.ValueFrom(context.Background(), got, elasticsearchConfigSchema().FrameworkType(), &config) + assert.Nil(t, diags) + }) + } +} + +func Test_IsEsResourceStopped(t *testing.T) { + type args struct { + res *models.ElasticsearchResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsElasticsearchStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go new file mode 100644 index 000000000..a2c9720e7 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_remote_cluster.go @@ -0,0 +1,117 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchRemoteClusterTF struct { + DeploymentId types.String `tfsdk:"deployment_id"` + Alias types.String `tfsdk:"alias"` + RefId types.String `tfsdk:"ref_id"` + SkipUnavailable types.Bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteCluster struct { + DeploymentId *string `tfsdk:"deployment_id"` + Alias *string `tfsdk:"alias"` + RefId *string `tfsdk:"ref_id"` + SkipUnavailable *bool `tfsdk:"skip_unavailable"` +} + +type ElasticsearchRemoteClusters []ElasticsearchRemoteCluster + +func readElasticsearchRemoteClusters(in []*models.RemoteResourceRef) (ElasticsearchRemoteClusters, error) { + if len(in) == 0 { + return nil, nil + } + + clusters := make(ElasticsearchRemoteClusters, 0, len(in)) + + for _, model := range in { + cluster, err := readElasticsearchRemoteCluster(model) + if err != nil { + return nil, err + } + clusters = append(clusters, *cluster) + } + + return clusters, nil +} + +func ElasticsearchRemoteClustersPayload(ctx context.Context, clustersTF types.Set) (*models.RemoteResources, diag.Diagnostics) { + payloads := models.RemoteResources{Resources: []*models.RemoteResourceRef{}} + + for _, elem := range clustersTF.Elems { + var cluster ElasticsearchRemoteClusterTF + diags := tfsdk.ValueAs(ctx, elem, &cluster) + + if diags.HasError() { + return nil, diags + } + var payload models.RemoteResourceRef + + if !cluster.DeploymentId.IsNull() { + payload.DeploymentID = &cluster.DeploymentId.Value + } + + if !cluster.RefId.IsNull() { + payload.ElasticsearchRefID = &cluster.RefId.Value + } + + if !cluster.Alias.IsNull() { + payload.Alias = &cluster.Alias.Value + } + + if !cluster.SkipUnavailable.IsNull() { + payload.SkipUnavailable = &cluster.SkipUnavailable.Value + } + + payloads.Resources = append(payloads.Resources, &payload) + } + + return &payloads, nil +} + +func readElasticsearchRemoteCluster(in *models.RemoteResourceRef) (*ElasticsearchRemoteCluster, error) { + var cluster ElasticsearchRemoteCluster + + if in.DeploymentID != nil && *in.DeploymentID != "" { + cluster.DeploymentId = in.DeploymentID + } + + if in.ElasticsearchRefID != nil && *in.ElasticsearchRefID != "" { + cluster.RefId = in.ElasticsearchRefID + } + + if in.Alias != nil && *in.Alias != "" { + cluster.Alias = in.Alias + } + + if in.SkipUnavailable != nil { + cluster.SkipUnavailable = in.SkipUnavailable + } + + return &cluster, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go new file mode 100644 index 000000000..279979f98 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_snapshot_source.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type ElasticsearchSnapshotSource v1.ElasticsearchSnapshotSource + +func elasticsearchSnapshotSourcePayload(ctx context.Context, srcObj attr.Value, payload *models.ElasticsearchClusterPlan) diag.Diagnostics { + var snapshot *v1.ElasticsearchSnapshotSourceTF + + if srcObj.IsNull() || srcObj.IsUnknown() { + return nil + } + + if diags := tfsdk.ValueAs(ctx, srcObj, &snapshot); diags.HasError() { + return diags + } + + if snapshot == nil { + return nil + } + + if payload.Transient == nil { + payload.Transient = &models.TransientElasticsearchPlanConfiguration{ + RestoreSnapshot: &models.RestoreSnapshotConfiguration{}, + } + } + + if !snapshot.SourceElasticsearchClusterId.IsNull() { + payload.Transient.RestoreSnapshot.SourceClusterID = snapshot.SourceElasticsearchClusterId.Value + } + + if !snapshot.SnapshotName.IsNull() { + payload.Transient.RestoreSnapshot.SnapshotName = &snapshot.SnapshotName.Value + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go new file mode 100644 index 000000000..c007c4823 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_test_utils.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import "github.com/elastic/cloud-sdk-go/pkg/models" + +func CreateTierForTest(tierId string, tier ElasticsearchTopology) *ElasticsearchTopology { + res := tier + res.id = tierId + return &res +} + +func EnrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) *models.ElasticsearchPayload { + tpl.DisplayName = want.DisplayName + tpl.RefID = want.RefID + tpl.Region = want.Region + tpl.Settings = want.Settings + tpl.Plan.AutoscalingEnabled = want.Plan.AutoscalingEnabled + tpl.Plan.Elasticsearch = want.Plan.Elasticsearch + tpl.Plan.Transient = want.Plan.Transient + + for i, t := range tpl.Plan.ClusterTopology { + for _, w := range want.Plan.ClusterTopology { + if t.ID == w.ID { + tpl.Plan.ClusterTopology[i] = w + } + } + } + + return tpl +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go new file mode 100644 index 000000000..29c7abd6b --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_topology.go @@ -0,0 +1,362 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "encoding/json" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTopologyTF struct { + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeData types.String `tfsdk:"node_type_data"` + NodeTypeMaster types.String `tfsdk:"node_type_master"` + NodeTypeIngest types.String `tfsdk:"node_type_ingest"` + NodeTypeMl types.String `tfsdk:"node_type_ml"` + NodeRoles types.Set `tfsdk:"node_roles"` + Autoscaling types.Object `tfsdk:"autoscaling"` +} + +type ElasticsearchTopology struct { + id string + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeData *string `tfsdk:"node_type_data"` + NodeTypeMaster *string `tfsdk:"node_type_master"` + NodeTypeIngest *string `tfsdk:"node_type_ingest"` + NodeTypeMl *string `tfsdk:"node_type_ml"` + NodeRoles []string `tfsdk:"node_roles"` + Autoscaling *ElasticsearchTopologyAutoscaling `tfsdk:"autoscaling"` +} + +type ElasticsearchTopologyAutoscaling v1.ElasticsearchTopologyAutoscaling + +type ElasticsearchTopologiesTF map[string]ElasticsearchTopologyTF + +func (topology ElasticsearchTopologyTF) payload(ctx context.Context, topologyID string, planTopologies []*models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + var diags diag.Diagnostics + + topologyElem, err := matchEsTopologyID(topologyID, planTopologies) + if err != nil { + diags.AddError("topology matching error", err.Error()) + return diags + } + + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) + if err != nil { + diags.AddError("size parsing error", err.Error()) + } + + if size != nil { + topologyElem.Size = size + } + + if topology.ZoneCount.Value > 0 { + topologyElem.ZoneCount = int32(topology.ZoneCount.Value) + } + + if err := topology.parseLegacyNodeType(topologyElem.NodeType); err != nil { + diags.AddError("topology legacy node type error", err.Error()) + } + + var nodeRoles []string + ds := topology.NodeRoles.ElementsAs(ctx, &nodeRoles, true) + diags.Append(ds...) + + if !ds.HasError() && len(nodeRoles) > 0 { + topologyElem.NodeRoles = nodeRoles + topologyElem.NodeType = nil + } + + diags.Append(elasticsearchTopologyAutoscalingPayload(ctx, topology.Autoscaling, topologyID, topologyElem)...) + + diags = append(diags, ds...) + + return diags +} + +func readElasticsearchTopologies(in *models.ElasticsearchClusterPlan) (ElasticsearchTopologies, error) { + if len(in.ClusterTopology) == 0 { + return nil, nil + } + + topology := make(map[string]ElasticsearchTopology, len(in.ClusterTopology)) + + for _, model := range in.ClusterTopology { + tier, err := readElasticsearchTopology(model) + if err != nil { + return nil, err + } + if tier.id != "" { + topology[tier.id] = *tier + } + } + + return topology, nil +} + +func readElasticsearchTopology(model *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopology, error) { + var topology ElasticsearchTopology + + topology.id = model.ID + + if model.InstanceConfigurationID != "" { + topology.InstanceConfigurationId = &model.InstanceConfigurationID + } + + if model.Size != nil { + topology.Size = ec.String(util.MemoryToState(*model.Size.Value)) + topology.SizeResource = model.Size.Resource + } + + topology.ZoneCount = int(model.ZoneCount) + + if nt := model.NodeType; nt != nil { + if nt.Data != nil { + topology.NodeTypeData = ec.String(strconv.FormatBool(*nt.Data)) + } + + if nt.Ingest != nil { + topology.NodeTypeIngest = ec.String(strconv.FormatBool(*nt.Ingest)) + } + + if nt.Master != nil { + topology.NodeTypeMaster = ec.String(strconv.FormatBool(*nt.Master)) + } + + if nt.Ml != nil { + topology.NodeTypeMl = ec.String(strconv.FormatBool(*nt.Ml)) + } + } + + topology.NodeRoles = model.NodeRoles + + autoscaling, err := readElasticsearchTopologyAutoscaling(model) + if err != nil { + return nil, err + } + topology.Autoscaling = autoscaling + + return &topology, nil +} + +func readElasticsearchTopologyAutoscaling(topology *models.ElasticsearchClusterTopologyElement) (*ElasticsearchTopologyAutoscaling, error) { + var a ElasticsearchTopologyAutoscaling + + if max := topology.AutoscalingMax; max != nil { + a.MaxSizeResource = max.Resource + a.MaxSize = ec.String(util.MemoryToState(*max.Value)) + } + + if min := topology.AutoscalingMin; min != nil { + a.MinSizeResource = min.Resource + a.MinSize = ec.String(util.MemoryToState(*min.Value)) + } + + if topology.AutoscalingPolicyOverrideJSON != nil { + b, err := json.Marshal(topology.AutoscalingPolicyOverrideJSON) + if err != nil { + return nil, fmt.Errorf("elasticsearch topology %s: unable to persist policy_override_json - %w", topology.ID, err) + } + a.PolicyOverrideJson = ec.String(string(b)) + } + + return &a, nil +} + +func (topology *ElasticsearchTopologyTF) parseLegacyNodeType(nodeType *models.ElasticsearchNodeType) error { + if nodeType == nil { + return nil + } + + if topology.NodeTypeData.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeData.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_data value: %w", err) + } + nodeType.Data = &nt + } + + if topology.NodeTypeMaster.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeMaster.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_master value: %w", err) + } + nodeType.Master = &nt + } + + if topology.NodeTypeIngest.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeIngest.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_ingest value: %w", err) + } + nodeType.Ingest = &nt + } + + if topology.NodeTypeMl.Value != "" { + nt, err := strconv.ParseBool(topology.NodeTypeMl.Value) + if err != nil { + return fmt.Errorf("failed parsing node_type_ml value: %w", err) + } + nodeType.Ml = &nt + } + + return nil +} + +func (topology *ElasticsearchTopologyTF) HasNodeType() bool { + for _, nodeType := range []types.String{topology.NodeTypeData, topology.NodeTypeIngest, topology.NodeTypeMaster, topology.NodeTypeMl} { + if !nodeType.IsUnknown() && !nodeType.IsNull() && nodeType.Value != "" { + return true + } + } + return false +} + +type ElasticsearchTopologies map[string]ElasticsearchTopology + +func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { + for _, t := range topologies { + if t.ID == id { + return t, nil + } + } + + topIDs := topologyIDs(topologies) + for i, id := range topIDs { + topIDs[i] = "\"" + id + "\"" + } + + return nil, fmt.Errorf(`invalid id ('%s'): valid topology IDs are %s`, id, strings.Join(topIDs, ", ")) +} + +func topologyIDs(topologies []*models.ElasticsearchClusterTopologyElement) []string { + var result []string + + for _, topology := range topologies { + result = append(result, topology.ID) + } + + if len(result) == 0 { + return nil + } + return result +} + +func elasticsearchTopologyAutoscalingPayload(ctx context.Context, autoObj attr.Value, topologyID string, payload *models.ElasticsearchClusterTopologyElement) diag.Diagnostics { + var diag diag.Diagnostics + + if autoObj.IsNull() || autoObj.IsUnknown() { + return nil + } + + // it should be only one element if any + var autoscale v1.ElasticsearchTopologyAutoscalingTF + + if diags := tfsdk.ValueAs(ctx, autoObj, &autoscale); diags.HasError() { + return diags + } + + if autoscale == (v1.ElasticsearchTopologyAutoscalingTF{}) { + return nil + } + + if !autoscale.MinSize.IsNull() && !autoscale.MinSize.IsUnknown() { + if payload.AutoscalingMin == nil { + payload.AutoscalingMin = new(models.TopologySize) + } + + err := expandAutoscalingDimension(autoscale, payload.AutoscalingMin, autoscale.MinSize, autoscale.MinSizeResource) + if err != nil { + diag.AddError("fail to parse autoscale min size", err.Error()) + return diag + } + + if reflect.DeepEqual(payload.AutoscalingMin, new(models.TopologySize)) { + payload.AutoscalingMin = nil + } + } + + if !autoscale.MaxSize.IsNull() && !autoscale.MaxSize.IsUnknown() { + if payload.AutoscalingMax == nil { + payload.AutoscalingMax = new(models.TopologySize) + } + + err := expandAutoscalingDimension(autoscale, payload.AutoscalingMax, autoscale.MaxSize, autoscale.MaxSizeResource) + if err != nil { + diag.AddError("fail to parse autoscale max size", err.Error()) + return diag + } + + if reflect.DeepEqual(payload.AutoscalingMax, new(models.TopologySize)) { + payload.AutoscalingMax = nil + } + } + + if autoscale.PolicyOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(autoscale.PolicyOverrideJson.Value), + &payload.AutoscalingPolicyOverrideJSON, + ); err != nil { + diag.AddError(fmt.Sprintf("elasticsearch topology %s: unable to load policy_override_json", topologyID), err.Error()) + return diag + } + } + + return diag +} + +// expandAutoscalingDimension centralises processing of %_size and %_size_resource attributes +func expandAutoscalingDimension(autoscale v1.ElasticsearchTopologyAutoscalingTF, model *models.TopologySize, size, sizeResource types.String) error { + if size.Value != "" { + val, err := deploymentsize.ParseGb(size.Value) + if err != nil { + return err + } + model.Value = &val + + if model.Resource == nil { + model.Resource = ec.String("memory") + } + } + + if sizeResource.Value != "" { + model.Resource = &sizeResource.Value + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go new file mode 100644 index 000000000..9f843f5fa --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_account.go @@ -0,0 +1,59 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" +) + +type ElasticsearchTrustAccounts v1.ElasticsearchTrustAccounts + +func readElasticsearchTrustAccounts(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustAccounts, error) { + if in == nil || in.Trust == nil { + return nil, nil + } + + accounts := make(ElasticsearchTrustAccounts, 0, len(in.Trust.Accounts)) + + for _, model := range in.Trust.Accounts { + account, err := readElasticsearchTrustAccount(model) + if err != nil { + return nil, err + } + accounts = append(accounts, *account) + } + + return accounts, nil +} + +func readElasticsearchTrustAccount(in *models.AccountTrustRelationship) (*v1.ElasticsearchTrustAccount, error) { + var acc v1.ElasticsearchTrustAccount + + if in.AccountID != nil { + acc.AccountId = in.AccountID + } + + if in.TrustAll != nil { + acc.TrustAll = in.TrustAll + } + + acc.TrustAllowlist = in.TrustAllowlist + + return &acc, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go new file mode 100644 index 000000000..0aa2e3731 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/elasticsearch_trust_external.go @@ -0,0 +1,168 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ElasticsearchTrustExternals v1.ElasticsearchTrustExternals + +func readElasticsearchTrustExternals(in *models.ElasticsearchClusterSettings) (ElasticsearchTrustExternals, error) { + if in == nil || in.Trust == nil { + return nil, nil + } + + externals := make(ElasticsearchTrustExternals, 0, len(in.Trust.External)) + + for _, model := range in.Trust.External { + external, err := readElasticsearchTrustExternal(model) + if err != nil { + return nil, err + } + externals = append(externals, *external) + } + + return externals, nil +} + +func elasticsearchTrustExternalPayload(ctx context.Context, externals types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { + var diags diag.Diagnostics + + payloads := make([]*models.ExternalTrustRelationship, 0, len(externals.Elems)) + + for _, elem := range externals.Elems { + var external v1.ElasticsearchTrustExternalTF + + ds := tfsdk.ValueAs(ctx, elem, &external) + + diags = append(diags, ds...) + + if diags.HasError() { + continue + } + + id := external.RelationshipId.Value + all := external.TrustAll.Value + + payload := &models.ExternalTrustRelationship{ + TrustRelationshipID: &id, + TrustAll: &all, + } + + ds = external.TrustAllowlist.ElementsAs(ctx, &payload.TrustAllowlist, true) + + diags = append(diags, ds...) + + if ds.HasError() { + continue + } + + payloads = append(payloads, payload) + } + + if len(payloads) == 0 { + return model, nil + } + + if model == nil { + model = &models.ElasticsearchClusterSettings{} + } + + if model.Trust == nil { + model.Trust = &models.ElasticsearchClusterTrustSettings{} + } + + model.Trust.External = append(model.Trust.External, payloads...) + + return model, nil +} + +func readElasticsearchTrustExternal(in *models.ExternalTrustRelationship) (*v1.ElasticsearchTrustExternal, error) { + var ext v1.ElasticsearchTrustExternal + + if in.TrustRelationshipID != nil { + ext.RelationshipId = in.TrustRelationshipID + } + + if in.TrustAll != nil { + ext.TrustAll = in.TrustAll + } + + ext.TrustAllowlist = in.TrustAllowlist + + return &ext, nil +} + +func elasticsearchTrustAccountPayload(ctx context.Context, accounts types.Set, model *models.ElasticsearchClusterSettings) (*models.ElasticsearchClusterSettings, diag.Diagnostics) { + var diags diag.Diagnostics + + payloads := make([]*models.AccountTrustRelationship, 0, len(accounts.Elems)) + + for _, elem := range accounts.Elems { + var account v1.ElasticsearchTrustAccountTF + + ds := tfsdk.ValueAs(ctx, elem, &account) + + diags = append(diags, ds...) + + if ds.HasError() { + continue + } + + id := account.AccountId.Value + all := account.TrustAll.Value + + payload := &models.AccountTrustRelationship{ + AccountID: &id, + TrustAll: &all, + } + + ds = account.TrustAllowlist.ElementsAs(ctx, &payload.TrustAllowlist, true) + + diags = append(diags, ds...) + + if ds.HasError() { + continue + } + + payloads = append(payloads, payload) + } + + if len(payloads) == 0 { + return model, nil + } + + if model == nil { + model = &models.ElasticsearchClusterSettings{} + } + + if model.Trust == nil { + model.Trust = &models.ElasticsearchClusterTrustSettings{} + } + + model.Trust.Accounts = append(model.Trust.Accounts, payloads...) + + return model, nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go new file mode 100644 index 000000000..fd2b5dd1f --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles.go @@ -0,0 +1,203 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/blang/semver" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func CompatibleWithNodeRoles(version string) (bool, error) { + deploymentVersion, err := semver.Parse(version) + if err != nil { + return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) + } + + return deploymentVersion.GE(utils.DataTiersVersion), nil +} + +func UseNodeRoles(ctx context.Context, stateVersion, planVersion types.String, planElasticsearch types.Object) (bool, diag.Diagnostics) { + compatibleWithNodeRoles, err := CompatibleWithNodeRoles(planVersion.Value) + + if err != nil { + var diags diag.Diagnostics + diags.AddError("Failed to determine whether to use node_roles", err.Error()) + return false, diags + } + + if !compatibleWithNodeRoles { + return false, nil + } + + convertLegacy, diags := legacyToNodeRoles(ctx, stateVersion, planVersion, planElasticsearch) + + if diags.HasError() { + return false, diags + } + + return convertLegacy, nil +} + +// legacyToNodeRoles returns true when the legacy "node_type_*" should be +// migrated over to node_roles. Which will be true when: +// * The version field doesn't change. +// * The version field changes but: +// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. +func legacyToNodeRoles(ctx context.Context, stateVersion, planVersion types.String, planElasticsearch types.Object) (bool, diag.Diagnostics) { + if stateVersion.Value == "" || stateVersion.Value == planVersion.Value { + return true, nil + } + + var diags diag.Diagnostics + oldVersion, err := semver.Parse(stateVersion.Value) + if err != nil { + diags.AddError("Failed to parse previous Elasticsearch version", err.Error()) + return false, diags + } + newVersion, err := semver.Parse(planVersion.Value) + if err != nil { + diags.AddError("Failed to parse new Elasticsearch version", err.Error()) + return false, diags + } + + // if the version change moves from non-node_roles to one + // that supports node roles, do not migrate on that step. + if oldVersion.LT(utils.DataTiersVersion) && newVersion.GE(utils.DataTiersVersion) { + return false, nil + } + + // When any topology elements in the state have the node_type_* + // properties set, the node_role field cannot be used, since + // we'd be changing the version AND migrating over `node_role`s + // which is not permitted by the API. + + var es *ElasticsearchTF + + if diags := tfsdk.ValueAs(ctx, planElasticsearch, &es); diags.HasError() { + return false, diags + } + + if es == nil { + diags.AddError("Cannot migrate node types to node roles", "cannot find elasticsearch object") + return false, diags + } + + tiers, diags := es.topologies(ctx) + + if diags.HasError() { + return false, diags + } + + for _, tier := range tiers { + if tier.HasNodeType() { + return false, nil + } + } + + return true, nil +} + +func useStateAndNodeRolesInPlanModifiers(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) (useState, useNodeRoles bool) { + if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { + return false, false + } + + if !resp.AttributePlan.IsUnknown() { + return false, false + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + // it's the precaution taken from the Framework's `UseStateForUnknown` plan modifier + if req.AttributeConfig.IsUnknown() { + return false, false + } + + // if there is no state for "version" return + var stateVersion types.String + + if diags := req.State.GetAttribute(ctx, path.Root("version"), &stateVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + if stateVersion.IsNull() { + return false, false + } + + // if template changed return + templateChanged, diags := attributeChanged(ctx, path.Root("deployment_template_id"), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return false, false + } + + if templateChanged { + return false, false + } + + // get version for plan and state and calculate useNodeRoles + + var planVersion types.String + + if diags := req.Plan.GetAttribute(ctx, path.Root("version"), &planVersion); diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + var elasticsearch types.Object + + if diags := req.Plan.GetAttribute(ctx, path.Root("elasticsearch"), &elasticsearch); diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + useNodeRoles, diags = UseNodeRoles(ctx, stateVersion, planVersion, elasticsearch) + + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return false, false + } + + return true, useNodeRoles +} + +func attributeChanged(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { + var planValue attr.Value + + if diags := req.Plan.GetAttribute(ctx, p, &planValue); diags.HasError() { + return false, diags + } + + var stateValue attr.Value + + if diags := req.State.GetAttribute(ctx, p, &stateValue); diags.HasError() { + return false, diags + } + + return !planValue.Equal(stateValue), nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go new file mode 100644 index 000000000..930069ea2 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier.go @@ -0,0 +1,70 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +func UseNodeRolesDefault() tfsdk.AttributePlanModifier { + return nodeRolesDefault{} +} + +type nodeRolesDefault struct{} + +func (m nodeRolesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + useState, useNodeRoles := useStateAndNodeRolesInPlanModifiers(ctx, req, resp) + + if resp.Diagnostics.HasError() { + return + } + + if !useState { + return + } + + // If useNodeRoles is false, we can use the current state and + // it should be null in this case - we don't migrate back from node_roles to node_types + if !useNodeRoles && !req.AttributeState.IsNull() { + // it should not happen + return + } + + // If useNodeRoles is true, then there is either + // * state already uses node_roles or + // * state uses node_types but we need to migrate to node_roles. + // We cannot use state in the second case (migration to node_roles) + // It happens when the attriubute state is null. + if useNodeRoles && req.AttributeState.IsNull() { + return + } + + resp.AttributePlan = req.AttributeState +} + +// Description returns a human-readable description of the plan modifier. +func (r nodeRolesDefault) Description(ctx context.Context) string { + return "Use current state if it's still valid." +} + +// MarkdownDescription returns a markdown description of the plan modifier. +func (r nodeRolesDefault) MarkdownDescription(ctx context.Context) string { + return "Use current state if it's still valid." +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go new file mode 100644 index 000000000..2224edb26 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_plan_modifier_test.go @@ -0,0 +1,207 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2_test + +/* +func Test_nodeRolesPlanModifier(t *testing.T) { + type args struct { + attributeState []string + attributePlan []string + deploymentState *deploymentv2.Deployment + deploymentPlan deploymentv2.Deployment + } + tests := []struct { + name string + args args + expected []string + expectedUnknown bool + }{ + { + name: "it should keep current plan value if it's defined", + args: args{ + attributePlan: []string{ + "data_content", + "data_hot", + "ingest", + "master", + }, + }, + expected: []string{ + "data_content", + "data_hot", + "ingest", + "master", + }, + }, + + { + name: "it should not use state if state doesn't have `version`", + args: args{}, + expectedUnknown: true, + }, + + { + name: "it should not use state if plan changed deployment template`", + args: args{ + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-storage-optimized-v3", + }, + }, + expectedUnknown: true, + }, + + { + name: "it should not use state if plan version is less than 7.10.0 but the attribute state is not null`", + args: args{ + attributeState: []string{"data_hot"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + }, + expectedUnknown: true, + }, + + { + name: "it should not use state if plan version is changed over 7.10.0 and the attribute state is not null`", + args: args{ + attributeState: []string{"data_hot"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.1", + }, + }, + expectedUnknown: true, + }, + + { + name: "it should use state if plan version is changed over 7.10.0 and the attribute state is null`", + args: args{ + attributeState: nil, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.1", + }, + }, + expected: nil, + }, + + { + name: "it should use state if both plan and state versions is or higher than 7.10.0 and the attribute state is not null`", + args: args{ + attributeState: []string{"data_hot"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expected: []string{"data_hot"}, + }, + + { + name: "it should not use state if both plan and state versions is or higher than 7.10.0 and the attribute state is null`", + args: args{ + attributeState: nil, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expectedUnknown: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + modifier := v2.UseNodeRolesDefault() + + // attributeConfig value is not used in the plan modifer + // it just should be known + attributeConfigValue := attrValueFromGoTypeValue(t, []string{}, types.SetType{ElemType: types.StringType}) + + attributeStateValue := attrValueFromGoTypeValue(t, tt.args.attributeState, types.SetType{ElemType: types.StringType}) + + deploymentStateValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentState, deploymentv2.DeploymentSchema().Type()) + + deploymentPlanValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type()) + + req := tfsdk.ModifyAttributePlanRequest{ + AttributeConfig: attributeConfigValue, + AttributeState: attributeStateValue, + State: tfsdk.State{ + Raw: deploymentStateValue, + Schema: deploymentv2.DeploymentSchema(), + }, + Plan: tfsdk.Plan{ + Raw: deploymentPlanValue, + Schema: deploymentv2.DeploymentSchema(), + }, + } + + // the default plan value is `Unknown` ("known after apply") + // the plan modifier either keeps this value or uses the current state + // if test doesn't specify plan value, let's use the default (`Unknown`) value that is used by TF during plan modifier execution + attributePlanValue := unknownValueFromAttrType(t, types.SetType{ElemType: types.StringType}) + if tt.args.attributePlan != nil { + attributePlanValue = attrValueFromGoTypeValue(t, tt.args.attributePlan, types.SetType{ElemType: types.StringType}) + } + + resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: attributePlanValue} + + modifier.Modify(context.Background(), req, &resp) + + assert.Nil(t, resp.Diagnostics) + + if tt.expectedUnknown { + assert.True(t, resp.AttributePlan.IsUnknown(), "attributePlan should be unknown") + return + } + + var attributePlan []string + + diags := tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) + + assert.Nil(t, diags) + + assert.Equal(t, tt.expected, attributePlan) + }) + } +} +*/ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go new file mode 100644 index 000000000..fbfd7efc8 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_roles_test.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_UseNodeRoles(t *testing.T) { + type args struct { + stateVersion string + planVersion string + elasticsearch Elasticsearch + } + tests := []struct { + name string + args args + expected bool + expectedDiags diag.Diagnostics + }{ + + { + name: "it should fail when plan version is invalid", + args: args{ + stateVersion: "7.0.0", + planVersion: "invalid_plan_version", + }, + expected: true, + expectedDiags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Failed to determine whether to use node_roles", "failed to parse Elasticsearch version: No Major.Minor.Patch elements found") + return diags + }(), + }, + + { + name: "it should fail when state version is invalid", + args: args{ + stateVersion: "invalid.state.version", + planVersion: "7.10.0", + }, + expected: true, + expectedDiags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Failed to parse previous Elasticsearch version", `Invalid character(s) found in major number "invalid"`) + return diags + }(), + }, + + { + name: "it should instruct to use node_types if both version are prior to 7.10.0", + args: args{ + stateVersion: "7.9.0", + planVersion: "7.9.1", + }, + expected: false, + }, + + { + name: "it should instruct to use node_types if plan version is 7.10.0 and state version is prior to 7.10.0", + args: args{ + stateVersion: "7.9.0", + planVersion: "7.10.0", + }, + expected: false, + }, + + { + name: "it should instruct to use node_types if plan version is after 7.10.0 and state version is prior to 7.10.0", + args: args{ + stateVersion: "7.9.2", + planVersion: "7.10.1", + }, + expected: false, + }, + + { + name: "it should instruct to use node_types if plan version is after 7.10.0 and state version is prior to 7.10.0", + args: args{ + stateVersion: "7.9.2", + planVersion: "7.10.1", + }, + expected: false, + }, + + { + name: "it should instruct to use node_roles if plan version is equal to state version and both is 7.10.0", + args: args{ + stateVersion: "7.10.0", + planVersion: "7.10.0", + }, + expected: true, + }, + + { + name: "it should instruct to use node_roles if plan version is equal to state version and both is after 7.10.0", + args: args{ + stateVersion: "7.10.2", + planVersion: "7.10.2", + }, + expected: true, + }, + + { + name: "it should instruct to use node_types if both plan version and state version are after 7.10.0 and plan uses node_types", + args: args{ + stateVersion: "7.11.1", + planVersion: "7.12.0", + elasticsearch: Elasticsearch{ + Topology: ElasticsearchTopologies{ + "hot_content": { + NodeTypeData: ec.String("true"), + NodeTypeMaster: ec.String("true"), + NodeTypeIngest: ec.String("true"), + NodeTypeMl: ec.String("false"), + }, + }, + }, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var elasticsearchObject types.Object + + diags := tfsdk.ValueFrom(context.Background(), tt.args.elasticsearch, ElasticsearchSchema().FrameworkType(), &elasticsearchObject) + + assert.Nil(t, diags) + + got, diags := UseNodeRoles(context.Background(), types.String{Value: tt.args.stateVersion}, types.String{Value: tt.args.planVersion}, elasticsearchObject) + + if tt.expectedDiags == nil { + assert.Nil(t, diags) + assert.Equal(t, tt.expected, got) + } else { + assert.Equal(t, tt.expectedDiags, diags) + } + + }) + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go new file mode 100644 index 000000000..31eba0308 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier.go @@ -0,0 +1,68 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +func UseNodeTypesDefault() tfsdk.AttributePlanModifier { + return nodeTypesDefault{} +} + +type nodeTypesDefault struct{} + +func (m nodeTypesDefault) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + useState, useNodeRoles := useStateAndNodeRolesInPlanModifiers(ctx, req, resp) + + if resp.Diagnostics.HasError() { + return + } + + if !useState { + return + } + + // If useNodeRoles is false, we can use the current state if it's not null + if !useNodeRoles && req.AttributeState.IsNull() { + return + } + + // If useNodeRoles is true, then there is either + // * state already uses node_roles or + // * state uses node_types but we need to migrate to node_roles. + // We cannot use state in the second case (migration to node_roles) + // It happens when the attriubute state is not null. + if useNodeRoles && !req.AttributeState.IsNull() { + return + } + + resp.AttributePlan = req.AttributeState +} + +// Description returns a human-readable description of the plan modifier. +func (r nodeTypesDefault) Description(ctx context.Context) string { + return "Use current state if it's still valid." +} + +// MarkdownDescription returns a markdown description of the plan modifier. +func (r nodeTypesDefault) MarkdownDescription(ctx context.Context) string { + return "Use current state if it's still valid." +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go new file mode 100644 index 000000000..a73f5fc8d --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/node_types_plan_modifier_test.go @@ -0,0 +1,177 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2_test + +/* +func Test_nodeTypesPlanModifier(t *testing.T) { + type args struct { + attributeState types.String + attributePlan *types.String + deploymentState *deploymentv2.Deployment + deploymentPlan deploymentv2.Deployment + } + tests := []struct { + name string + args args + expected types.String + }{ + { + name: "it should keep current plan value if it's defined", + args: args{ + attributePlan: &types.String{Value: "some value"}, + }, + expected: types.String{Value: "some value"}, + }, + + { + name: "it should not use state if state doesn't have `version`", + args: args{}, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if plan changed deployment template`", + args: args{ + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-storage-optimized-v3", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if plan version is less than 7.10.0 but the attribute state is null`", + args: args{ + attributeState: types.String{Null: true}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if plan version is changed over 7.10.0, but the attribute state is null`", + args: args{ + attributeState: types.String{Null: true}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.9.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.1", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if both plan and state versions is or higher than 7.10.0, but the attribute state is not null`", + args: args{ + attributeState: types.String{Value: "false"}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should use state if both plan and state versions is or higher than 7.10.0 and the attribute state is null`", + args: args{ + attributeState: types.String{Null: true}, + deploymentState: &deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Version: "7.10.0", + }, + }, + expected: types.String{Null: true}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + modifier := v2.UseNodeTypesDefault() + + // attributeConfig value is not used in the plan modifer + // it just should be known + attributeConfigValue := attrValueFromGoTypeValue(t, types.String{}, types.StringType) + + attributeStateValue := attrValueFromGoTypeValue(t, tt.args.attributeState, types.StringType) + + deploymentStateValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentState, deploymentv2.DeploymentSchema().Type()) + + deploymentPlanValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type()) + + req := tfsdk.ModifyAttributePlanRequest{ + AttributeConfig: attributeConfigValue, + AttributeState: attributeStateValue, + State: tfsdk.State{ + Raw: deploymentStateValue, + Schema: deploymentv2.DeploymentSchema(), + }, + Plan: tfsdk.Plan{ + Raw: deploymentPlanValue, + Schema: deploymentv2.DeploymentSchema(), + }, + } + + // the default plan value is `Unknown` ("known after apply") + // the plan modifier either keeps this value or uses the current state + // if test doesn't specify plan value, let's use the default (`Unknown`) value that is used by TF during plan modifier execution + + if tt.args.attributePlan == nil { + tt.args.attributePlan = &types.String{Unknown: true} + } + + attributePlanValue := attrValueFromGoTypeValue(t, tt.args.attributePlan, types.StringType) + + resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: attributePlanValue} + + modifier.Modify(context.Background(), req, &resp) + + assert.Nil(t, resp.Diagnostics) + + var attributePlan types.String + + diags := tfsdk.ValueAs(context.Background(), resp.AttributePlan, &attributePlan) + + assert.Nil(t, diags) + + assert.Equal(t, tt.expected, attributePlan) + }) + } +} +*/ diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go new file mode 100644 index 000000000..c634459b1 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/schema.go @@ -0,0 +1,461 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "strings" + + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/stringvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +// These constants are only used to determine whether or not a dedicated +// tier of masters or ingest (coordinating) nodes are set. +const ( + dataTierRolePrefix = "data_" + ingestDataTierRole = "ingest" + masterDataTierRole = "master" + autodetect = "autodetect" + growAndShrink = "grow_and_shrink" + rollingGrowAndShrink = "rolling_grow_and_shrink" + rollingAll = "rolling_all" +) + +// List of update strategies availables. +var strategiesList = []string{ + autodetect, growAndShrink, rollingGrowAndShrink, rollingAll, +} + +func ElasticsearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Required Elasticsearch resource definition", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "autoscale": { + Type: types.BoolType, + Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "ref_id": { + Type: types.StringType, + Description: "Optional ref_id to set on the Elasticsearch resource", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "resource_id": { + Type: types.StringType, + Description: "The Elasticsearch resource unique identifier", + Computed: true, + }, + "region": { + Type: types.StringType, + Description: "The Elasticsearch resource region", + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "cloud_id": { + Type: types.StringType, + Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", + Computed: true, + }, + "http_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTP endpoint", + Computed: true, + }, + "https_endpoint": { + Type: types.StringType, + Description: "The Elasticsearch resource HTTPs endpoint", + Computed: true, + }, + + "topology": elasticsearchTopologySchema(), + + "trust_account": elasticsearchTrustAccountSchema(), + + "trust_external": elasticsearchTrustExternalSchema(), + + "config": elasticsearchConfigSchema(), + + "remote_cluster": ElasticsearchRemoteClusterSchema(), + + "snapshot_source": elasticsearchSnapshotSourceSchema(), + + "extension": elasticsearchExtensionSchema(), + + "strategy": { + Description: "Configuration strategy type " + strings.Join(strategiesList, ", "), + Type: types.StringType, + Optional: true, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, + }, + }), + } +} + +func elasticsearchConfigSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: `Optional Elasticsearch settings which will be applied to all topologies`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "plugins": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", + Optional: true, + Computed: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, + Optional: true, + }, + }), + } +} + +func elasticsearchTopologyAutoscalingSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", + Required: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "max_size_resource": { + Description: "Maximum resource type for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, + }, + "max_size": { + Description: "Maximum size value for the maximum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, + }, + "min_size_resource": { + Description: "Minimum resource type for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, + }, + "min_size": { + Description: "Minimum size value for the minimum autoscaling setting.", + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, + }, + "policy_override_json": { + Type: types.StringType, + Description: "Computed policy overrides set directly via the API or other clients.", + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(2), + }, + }, + }), + } +} + +func ElasticsearchRemoteClusterSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Description: "Remote deployment ID", + Type: types.StringType, + Validators: []tfsdk.AttributeValidator{stringvalidator.LengthBetween(32, 32)}, + Required: true, + }, + "alias": { + Description: "Alias for this Cross Cluster Search binding", + Type: types.StringType, + Validators: []tfsdk.AttributeValidator{stringvalidator.NoneOf("")}, + Required: true, + }, + "ref_id": { + Description: `Remote elasticsearch "ref_id", it is best left to the default value`, + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + Optional: true, + }, + "skip_unavailable": { + Description: "If true, skip the cluster during search when disconnected", + Type: types.BoolType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + Computed: true, + Optional: true, + }, + }), + } +} + +func elasticsearchSnapshotSourceSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "source_elasticsearch_cluster_id": { + Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", + Type: types.StringType, + Required: true, + }, + "snapshot_name": { + Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "__latest_success__"}), + }, + Optional: true, + Computed: true, + }, + }), + } +} + +func elasticsearchExtensionSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", + Optional: true, + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "name": { + Description: "Extension name.", + Type: types.StringType, + Required: true, + }, + "type": { + Description: "Extension type, only `bundle` or `plugin` are supported.", + Type: types.StringType, + Required: true, + Validators: []tfsdk.AttributeValidator{stringvalidator.OneOf("bundle", "plugin")}, + }, + "version": { + Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", + Type: types.StringType, + Required: true, + }, + "url": { + Description: "Bundle or plugin URL, the extension URL can be obtained from the `ec_deployment_extension..url` attribute or the API and cannot be a random HTTP address that is hosted elsewhere.", + Type: types.StringType, + Required: true, + }, + }), + } +} + +func elasticsearchTrustAccountSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch account trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "account_id": { + Description: "The ID of the Account.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func elasticsearchTrustExternalSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Elasticsearch external trust settings.", + Attributes: tfsdk.SetNestedAttributes(map[string]tfsdk.Attribute{ + "relationship_id": { + Description: "The ID of the external trust relationship.", + Type: types.StringType, + Required: true, + }, + "trust_all": { + Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", + Type: types.BoolType, + Required: true, + }, + "trust_allowlist": { + Description: "The list of clusters to trust. Only used when `trust_all` is false.", + Type: types.SetType{ + ElemType: types.StringType, + }, + Optional: true, + }, + }), + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + } +} + +func elasticsearchTopologySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Required: true, + // Optional: true, + // Computed: true, + Description: `Elasticsearch topology`, + Attributes: tfsdk.MapNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Description: `Computed Instance Configuration ID of the topology element`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(1), + }, + }, + "size": { + Type: types.StringType, + Description: `Amount of "size_resource" per node in the "g" notation`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(1), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Description: `Number of zones that the Elasticsearch cluster will span. This is used to set HA`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseTopologyStateForUnknown(1), + }, + }, + "node_type_data": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (data node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_type_master": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (master node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_type_ingest": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (ingest node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_type_ml": { + Type: types.StringType, + Description: `The node type for the Elasticsearch Topology element (machine learning node)`, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeTypesDefault(), + }, + }, + "node_roles": { + Type: types.SetType{ + ElemType: types.StringType, + }, + Description: `The computed list of node roles for the current topology element`, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + UseNodeRolesDefault(), + }, + }, + "autoscaling": elasticsearchTopologyAutoscalingSchema(), + }), + } +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go new file mode 100644 index 000000000..a2773ec64 --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier.go @@ -0,0 +1,102 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// Use current state for a topology's attribute if the topology's state is not nil and the template attribute has not changed +func UseTopologyStateForUnknown(topologyNestingLevel int) tfsdk.AttributePlanModifier { + return useTopologyState{topologyNestingLevel: topologyNestingLevel} +} + +type useTopologyState struct { + topologyNestingLevel int +} + +func (m useTopologyState) Modify(ctx context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + if req.AttributeState == nil || resp.AttributePlan == nil || req.AttributeConfig == nil { + return + } + + if !resp.AttributePlan.IsUnknown() { + return + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return + } + + tierPath := req.AttributePath.ParentPath() + for i := m.topologyNestingLevel - 1; i > 0; i-- { + tierPath = tierPath.ParentPath() + } + + // we check state of entire topology state instead of topology attributes states because nil can be a valid state for some topology attributes + // e.g. `aws-io-optimized-v2` template doesn't specify `autoscaling_min` for `hot_content` so `min_size`'s state is nil + topologyStateDefined, diags := attributeStateDefined(ctx, tierPath, req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + if !topologyStateDefined { + return + } + + templateChanged, diags := attributeChanged(ctx, path.Root("deployment_template_id"), req) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + + if templateChanged { + return + } + + resp.AttributePlan = req.AttributeState +} + +func (r useTopologyState) Description(ctx context.Context) string { + return "Use tier's state if it's defined and template is the same." +} + +func (r useTopologyState) MarkdownDescription(ctx context.Context) string { + return "Use tier's state if it's defined and template is the same." +} + +func attributeStateDefined(ctx context.Context, p path.Path, req tfsdk.ModifyAttributePlanRequest) (bool, diag.Diagnostics) { + var val attr.Value + + if diags := req.State.GetAttribute(ctx, p, &val); diags.HasError() { + return false, diags + } + + return !val.IsNull() && !val.IsUnknown(), nil +} diff --git a/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go new file mode 100644 index 000000000..3cfe129cb --- /dev/null +++ b/ec/ecresource/deploymentresource/elasticsearch/v2/topology_plan_modifier_test.go @@ -0,0 +1,215 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2_test + +/* +import ( + "context" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/hashicorp/terraform-plugin-go/tftypes" + "github.com/stretchr/testify/assert" +) + +func Test_topologyPlanModifier(t *testing.T) { + type args struct { + // the actual attribute type doesn't matter + attributeState types.String + attributePlan types.String + deploymentState deploymentv2.Deployment + deploymentPlan deploymentv2.Deployment + } + tests := []struct { + name string + args args + expected types.String + }{ + { + name: "it should keep the current plan value if the plan is known", + args: args{ + attributeState: types.String{Value: "state value"}, + attributePlan: types.String{Value: "plan value"}, + }, + expected: types.String{Value: "plan value"}, + }, + + { + name: "it should not use state if there is no such topology in the state", + args: args{ + attributeState: types.String{Null: true}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + Elasticsearch: &v2.Elasticsearch{}, + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should not use state if the plan changed the template attribute", + args: args{ + attributeState: types.String{Value: "1g"}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ + MinSize: ec.String("1g"), + }, + }), + }, + }, + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-storage-optimized-v3", + Elasticsearch: &v2.Elasticsearch{ + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + }, + expected: types.String{Unknown: true}, + }, + + { + name: "it should use the current state if the state is null, the topology is defined in the state and the template has not changed", + args: args{ + attributeState: types.String{Null: true}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + }, + expected: types.String{Null: true}, + }, + + { + name: "it should use the current state if the topology is defined in the state and the template has not changed", + args: args{ + attributeState: types.String{Value: "1g"}, + attributePlan: types.String{Unknown: true}, + deploymentState: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{ + MaxSize: ec.String("1g"), + }, + }), + }, + }, + }, + deploymentPlan: deploymentv2.Deployment{ + DeploymentTemplateId: "aws-io-optimized-v2", + Elasticsearch: &v2.Elasticsearch{ + Topology: v2.ElasticsearchTopologies{ + *v2.CreateTierForTest("hot_content", v2.ElasticsearchTopology{ + Autoscaling: &v2.ElasticsearchTopologyAutoscaling{}, + }), + }, + }, + }, + }, + expected: types.String{Value: "1g"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + modifier := v2.UseTopologyStateForUnknown("hot") + + deploymentStateValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentState, deploymentv2.DeploymentSchema().Type()) + + deploymentPlanValue := tftypesValueFromGoTypeValue(t, tt.args.deploymentPlan, deploymentv2.DeploymentSchema().Type()) + + req := tfsdk.ModifyAttributePlanRequest{ + // attributeConfig value is not used in the plan modifer + // it just should be known + AttributeConfig: types.String{}, + AttributeState: tt.args.attributeState, + State: tfsdk.State{ + Raw: deploymentStateValue, + Schema: deploymentv2.DeploymentSchema(), + }, + Plan: tfsdk.Plan{ + Raw: deploymentPlanValue, + Schema: deploymentv2.DeploymentSchema(), + }, + } + + resp := tfsdk.ModifyAttributePlanResponse{AttributePlan: tt.args.attributePlan} + + modifier.Modify(context.Background(), req, &resp) + + assert.Nil(t, resp.Diagnostics) + + assert.Equal(t, tt.expected, resp.AttributePlan) + }) + } +} + +func attrValueFromGoTypeValue(t *testing.T, goValue any, attributeType attr.Type) attr.Value { + var attrValue attr.Value + diags := tfsdk.ValueFrom(context.Background(), goValue, attributeType, &attrValue) + assert.Nil(t, diags) + return attrValue +} + +func tftypesValueFromGoTypeValue(t *testing.T, goValue any, attributeType attr.Type) tftypes.Value { + attrValue := attrValueFromGoTypeValue(t, goValue, attributeType) + tftypesValue, err := attrValue.ToTerraformValue(context.Background()) + assert.Nil(t, err) + return tftypesValue +} + +func unknownValueFromAttrType(t *testing.T, attributeType attr.Type) attr.Value { + tfVal := tftypes.NewValue(attributeType.TerraformType(context.Background()), tftypes.UnknownValue) + val, err := attributeType.ValueFromTerraform(context.Background(), tfVal) + assert.Nil(t, err) + return val +} +*/ diff --git a/ec/ecresource/deploymentresource/elasticsearch_expanders.go b/ec/ecresource/deploymentresource/elasticsearch_expanders.go deleted file mode 100644 index 24da2e967..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_expanders.go +++ /dev/null @@ -1,650 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "fmt" - "reflect" - "strconv" - "strings" - - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// These constants are only used to determine whether or not a dedicated -// tier of masters or ingest (coordinating) nodes are set. -const ( - dataTierRolePrefix = "data_" - ingestDataTierRole = "ingest" - masterDataTierRole = "master" - autodetect = "autodetect" - growAndShrink = "grow_and_shrink" - rollingGrowAndShrink = "rolling_grow_and_shrink" - rollingAll = "rolling_all" -) - -// List of update strategies availables. -var strategiesList = []string{ - autodetect, growAndShrink, rollingGrowAndShrink, rollingAll, -} - -// expandEsResources expands Elasticsearch resources -func expandEsResources(ess []interface{}, tpl *models.ElasticsearchPayload) ([]*models.ElasticsearchPayload, error) { - if len(ess) == 0 { - return nil, nil - } - - result := make([]*models.ElasticsearchPayload, 0, len(ess)) - for _, raw := range ess { - resResource, err := expandEsResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -// expandEsResource expands a single Elasticsearch resource -func expandEsResource(raw interface{}, res *models.ElasticsearchPayload) (*models.ElasticsearchPayload, error) { - es := raw.(map[string]interface{}) - - if refID, ok := es["ref_id"].(string); ok { - res.RefID = ec.String(refID) - } - - if region, ok := es["region"].(string); ok && region != "" { - res.Region = ec.String(region) - } - - // Unsetting the curation properties is since they're deprecated since - // >= 6.6.0 which is when ILM is introduced in Elasticsearch. - unsetElasticsearchCuration(res) - - if rt, ok := es["topology"].([]interface{}); ok && len(rt) > 0 { - topology, err := expandEsTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } - - // Fixes the node_roles field to remove the dedicated tier roles from the - // list when these are set as a dedicated tier as a topology element. - updateNodeRolesOnDedicatedTiers(res.Plan.ClusterTopology) - - if cfg, ok := es["config"].([]interface{}); ok { - if err := expandEsConfig(cfg, res.Plan.Elasticsearch); err != nil { - return nil, err - } - } - - if snap, ok := es["snapshot_source"].([]interface{}); ok && len(snap) > 0 { - res.Plan.Transient = &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{}, - } - expandSnapshotSource(snap, res.Plan.Transient.RestoreSnapshot) - } - - if ext, ok := es["extension"].(*schema.Set); ok && ext.Len() > 0 { - expandEsExtension(ext.List(), res.Plan.Elasticsearch) - } - - if autoscale, ok := es["autoscale"].(string); ok && autoscale != "" { - autoscaleBool, err := strconv.ParseBool(autoscale) - if err != nil { - return nil, fmt.Errorf("failed parsing autoscale value: %w", err) - } - res.Plan.AutoscalingEnabled = &autoscaleBool - } - - if trust, ok := es["trust_account"].(*schema.Set); ok && trust.Len() > 0 { - if res.Settings == nil { - res.Settings = &models.ElasticsearchClusterSettings{} - } - expandAccountTrust(trust.List(), res.Settings) - } - - if trust, ok := es["trust_external"].(*schema.Set); ok && trust.Len() > 0 { - if res.Settings == nil { - res.Settings = &models.ElasticsearchClusterSettings{} - } - expandExternalTrust(trust.List(), res.Settings) - } - - if strategy, ok := es["strategy"].([]interface{}); ok && len(strategy) > 0 { - if res.Plan.Transient == nil { - res.Plan.Transient = &models.TransientElasticsearchPlanConfiguration{ - Strategy: &models.PlanStrategy{}, - } - } - expandStrategy(strategy, res.Plan.Transient.Strategy) - } - - return res, nil -} - -// expandStrategy expands the Configuration Strategy. -func expandStrategy(raw []interface{}, strategy *models.PlanStrategy) { - for _, rawStrategy := range raw { - strategyCfg, ok := rawStrategy.(map[string]interface{}) - if !ok { - continue - } - - rawValue, ok := strategyCfg["type"].(string) - if !ok { - continue - } - - if rawValue == autodetect { - strategy.Autodetect = new(models.AutodetectStrategyConfig) - } else if rawValue == growAndShrink { - strategy.GrowAndShrink = new(models.GrowShrinkStrategyConfig) - } else if rawValue == rollingGrowAndShrink { - strategy.RollingGrowAndShrink = new(models.RollingGrowShrinkStrategyConfig) - } else if rawValue == rollingAll { - strategy.Rolling = &models.RollingStrategyConfig{ - GroupBy: "__all__", - } - } - } -} - -// expandEsTopology expands a flattened topology -func expandEsTopology(rawTopologies []interface{}, topologies []*models.ElasticsearchClusterTopologyElement) ([]*models.ElasticsearchClusterTopologyElement, error) { - res := topologies - - for _, rawTop := range rawTopologies { - topology, ok := rawTop.(map[string]interface{}) - if !ok { - continue - } - - var topologyID string - if id, ok := topology["id"].(string); ok { - topologyID = id - } - - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchEsTopologyID(topologyID, topologies) - if err != nil { - return nil, fmt.Errorf("elasticsearch topology %s: %w", topologyID, err) - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"].(int); ok && zones > 0 { - elem.ZoneCount = int32(zones) - } - - if err := parseLegacyNodeType(topology, elem.NodeType); err != nil { - return nil, err - } - - if nrSet, ok := topology["node_roles"].(*schema.Set); ok && nrSet.Len() > 0 { - elem.NodeRoles = util.ItemsToString(nrSet.List()) - elem.NodeType = nil - } - - if autoscalingRaw, ok := topology["autoscaling"].([]interface{}); ok && len(autoscalingRaw) > 0 { - for _, autoscaleRaw := range autoscalingRaw { - autoscale, ok := autoscaleRaw.(map[string]interface{}) - if !ok { - continue - } - - if elem.AutoscalingMax == nil { - elem.AutoscalingMax = new(models.TopologySize) - } - - if elem.AutoscalingMin == nil { - elem.AutoscalingMin = new(models.TopologySize) - } - - err := expandAutoscalingDimension(autoscale, elem.AutoscalingMax, "max") - if err != nil { - return nil, err - } - - err = expandAutoscalingDimension(autoscale, elem.AutoscalingMin, "min") - if err != nil { - return nil, err - } - - // Ensure that if the Min and Max are empty, they're nil. - if reflect.DeepEqual(elem.AutoscalingMin, new(models.TopologySize)) { - elem.AutoscalingMin = nil - } - if reflect.DeepEqual(elem.AutoscalingMax, new(models.TopologySize)) { - elem.AutoscalingMax = nil - } - - if policy, ok := autoscale["policy_override_json"].(string); ok && policy != "" { - if err := json.Unmarshal([]byte(policy), - &elem.AutoscalingPolicyOverrideJSON, - ); err != nil { - return nil, fmt.Errorf( - "elasticsearch topology %s: unable to load policy_override_json: %w", - topologyID, err, - ) - } - } - } - } - - if cfg, ok := topology["config"].([]interface{}); ok { - if elem.Elasticsearch == nil { - elem.Elasticsearch = &models.ElasticsearchConfiguration{} - } - if err := expandEsConfig(cfg, elem.Elasticsearch); err != nil { - return nil, err - } - } - } - - return res, nil -} - -// expandAutoscalingDimension centralises processing of %_size and %_size_resource attributes -// Due to limitations in the Terraform SDK, it's not possible to specify a Default on a Computed schema member -// to work around this limitation, this function will default the %_size_resource attribute to `memory`. -// Without this default, setting autoscaling limits on tiers which do not have those limits in the deployment -// template leads to an API error due to the empty resource field on the TopologySize model. -func expandAutoscalingDimension(autoscale map[string]interface{}, model *models.TopologySize, dimension string) error { - sizeAttribute := fmt.Sprintf("%s_size", dimension) - resourceAttribute := fmt.Sprintf("%s_size_resource", dimension) - - if size, ok := autoscale[sizeAttribute].(string); ok && size != "" { - val, err := deploymentsize.ParseGb(size) - if err != nil { - return err - } - model.Value = &val - - if model.Resource == nil { - model.Resource = ec.String("memory") - } - } - - if sizeResource, ok := autoscale[resourceAttribute].(string); ok && sizeResource != "" { - model.Resource = ec.String(sizeResource) - } - - return nil -} - -func expandEsConfig(raw []interface{}, esCfg *models.ElasticsearchConfiguration) error { - for _, rawCfg := range raw { - cfg, ok := rawCfg.(map[string]interface{}) - if !ok { - continue - } - if settings, ok := cfg["user_settings_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &esCfg.UserSettingsJSON); err != nil { - return fmt.Errorf( - "failed expanding elasticsearch user_settings_json: %w", err, - ) - } - } - if settings, ok := cfg["user_settings_override_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &esCfg.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf( - "failed expanding elasticsearch user_settings_override_json: %w", err, - ) - } - } - if settings, ok := cfg["user_settings_yaml"].(string); ok && settings != "" { - esCfg.UserSettingsYaml = settings - } - if settings, ok := cfg["user_settings_override_yaml"].(string); ok && settings != "" { - esCfg.UserSettingsOverrideYaml = settings - } - - if v, ok := cfg["plugins"].(*schema.Set); ok && v.Len() > 0 { - esCfg.EnabledBuiltInPlugins = util.ItemsToString(v.List()) - } - - if v, ok := cfg["docker_image"].(string); ok { - esCfg.DockerImage = v - } - } - - return nil -} - -func expandSnapshotSource(raw []interface{}, restore *models.RestoreSnapshotConfiguration) { - for _, rawRestore := range raw { - var rs, ok = rawRestore.(map[string]interface{}) - if !ok { - continue - } - - if clusterID, ok := rs["source_elasticsearch_cluster_id"].(string); ok { - restore.SourceClusterID = clusterID - } - - if snapshotName, ok := rs["snapshot_name"].(string); ok { - restore.SnapshotName = ec.String(snapshotName) - } - } -} - -func matchEsTopologyID(id string, topologies []*models.ElasticsearchClusterTopologyElement) (*models.ElasticsearchClusterTopologyElement, error) { - for _, t := range topologies { - if t.ID == id { - return t, nil - } - } - - topIDs := topologyIDs(topologies) - for i, id := range topIDs { - topIDs[i] = "\"" + id + "\"" - } - - return nil, fmt.Errorf(`invalid id: valid topology IDs are %s`, - strings.Join(topIDs, ", "), - ) -} - -func emptyEsResource() *models.ElasticsearchPayload { - return &models.ElasticsearchPayload{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{}, - }, - Settings: &models.ElasticsearchClusterSettings{}, - } -} - -// esResource returns the ElaticsearchPayload from a deployment -// template or an empty version of the payload. -func esResource(res *models.DeploymentTemplateInfoV2) *models.ElasticsearchPayload { - if len(res.DeploymentTemplate.Resources.Elasticsearch) == 0 { - return emptyEsResource() - } - return res.DeploymentTemplate.Resources.Elasticsearch[0] -} - -// esResourceFromUpdate returns the ElaticsearchPayload from a deployment -// update request or an empty version of the payload. -func esResourceFromUpdate(res *models.DeploymentUpdateResources) *models.ElasticsearchPayload { - if len(res.Elasticsearch) == 0 { - return emptyEsResource() - } - - return res.Elasticsearch[0] -} - -func unsetElasticsearchCuration(payload *models.ElasticsearchPayload) { - if payload.Plan.Elasticsearch != nil { - payload.Plan.Elasticsearch.Curation = nil - } - - if payload.Settings != nil { - payload.Settings.Curation = nil - } -} - -func topologyIDs(topologies []*models.ElasticsearchClusterTopologyElement) []string { - var result []string - - for _, topology := range topologies { - result = append(result, topology.ID) - } - - if len(result) == 0 { - return nil - } - return result -} - -func parseLegacyNodeType(topology map[string]interface{}, nodeType *models.ElasticsearchNodeType) error { - if nodeType == nil { - return nil - } - - if ntData, ok := topology["node_type_data"].(string); ok && ntData != "" { - nt, err := strconv.ParseBool(ntData) - if err != nil { - return fmt.Errorf("failed parsing node_type_data value: %w", err) - } - nodeType.Data = ec.Bool(nt) - } - - if ntMaster, ok := topology["node_type_master"].(string); ok && ntMaster != "" { - nt, err := strconv.ParseBool(ntMaster) - if err != nil { - return fmt.Errorf("failed parsing node_type_master value: %w", err) - } - nodeType.Master = ec.Bool(nt) - } - - if ntIngest, ok := topology["node_type_ingest"].(string); ok && ntIngest != "" { - nt, err := strconv.ParseBool(ntIngest) - if err != nil { - return fmt.Errorf("failed parsing node_type_ingest value: %w", err) - } - nodeType.Ingest = ec.Bool(nt) - } - - if ntMl, ok := topology["node_type_ml"].(string); ok && ntMl != "" { - nt, err := strconv.ParseBool(ntMl) - if err != nil { - return fmt.Errorf("failed parsing node_type_ml value: %w", err) - } - nodeType.Ml = ec.Bool(nt) - } - - return nil -} - -func updateNodeRolesOnDedicatedTiers(topologies []*models.ElasticsearchClusterTopologyElement) { - dataTier, hasMasterTier, hasIngestTier := dedicatedTopoogies(topologies) - // This case is not very likely since all deployments will have a data tier. - // It's here because the code path is technically possible and it's better - // than a straight panic. - if dataTier == nil { - return - } - - if hasIngestTier { - dataTier.NodeRoles = removeItemFromSlice( - dataTier.NodeRoles, ingestDataTierRole, - ) - } - if hasMasterTier { - dataTier.NodeRoles = removeItemFromSlice( - dataTier.NodeRoles, masterDataTierRole, - ) - } -} - -func dedicatedTopoogies(topologies []*models.ElasticsearchClusterTopologyElement) (dataTier *models.ElasticsearchClusterTopologyElement, hasMasterTier, hasIngestTier bool) { - for _, topology := range topologies { - var hasSomeDataRole bool - var hasMasterRole bool - var hasIngestRole bool - for _, role := range topology.NodeRoles { - sizeNonZero := *topology.Size.Value > 0 - if strings.HasPrefix(role, dataTierRolePrefix) && sizeNonZero { - hasSomeDataRole = true - } - if role == ingestDataTierRole && sizeNonZero { - hasIngestRole = true - } - if role == masterDataTierRole && sizeNonZero { - hasMasterRole = true - } - } - - if !hasSomeDataRole && hasMasterRole { - hasMasterTier = true - } - - if !hasSomeDataRole && hasIngestRole { - hasIngestTier = true - } - - if hasSomeDataRole && hasMasterRole { - dataTier = topology - } - } - - return dataTier, hasMasterTier, hasIngestTier -} - -func removeItemFromSlice(slice []string, item string) []string { - var hasItem bool - var itemIndex int - for i, str := range slice { - if str == item { - hasItem = true - itemIndex = i - } - } - if hasItem { - copy(slice[itemIndex:], slice[itemIndex+1:]) - return slice[:len(slice)-1] - } - return slice -} - -func expandEsExtension(raw []interface{}, es *models.ElasticsearchConfiguration) { - for _, rawExt := range raw { - m := rawExt.(map[string]interface{}) - - var version string - if v, ok := m["version"].(string); ok { - version = v - } - - var url string - if u, ok := m["url"].(string); ok { - url = u - } - - var name string - if n, ok := m["name"].(string); ok { - name = n - } - - if t, ok := m["type"].(string); ok && t == "bundle" { - es.UserBundles = append(es.UserBundles, &models.ElasticsearchUserBundle{ - Name: &name, - ElasticsearchVersion: &version, - URL: &url, - }) - } - - if t, ok := m["type"].(string); ok && t == "plugin" { - es.UserPlugins = append(es.UserPlugins, &models.ElasticsearchUserPlugin{ - Name: &name, - ElasticsearchVersion: &version, - URL: &url, - }) - } - } -} - -func expandAccountTrust(raw []interface{}, es *models.ElasticsearchClusterSettings) { - var accounts []*models.AccountTrustRelationship - for _, rawTrust := range raw { - m := rawTrust.(map[string]interface{}) - - var id string - if v, ok := m["account_id"].(string); ok { - id = v - } - - var all bool - if a, ok := m["trust_all"].(bool); ok { - all = a - } - - var allowlist []string - if al, ok := m["trust_allowlist"].(*schema.Set); ok && al.Len() > 0 { - allowlist = util.ItemsToString(al.List()) - } - - accounts = append(accounts, &models.AccountTrustRelationship{ - AccountID: &id, - TrustAll: &all, - TrustAllowlist: allowlist, - }) - } - - if len(accounts) == 0 { - return - } - - if es.Trust == nil { - es.Trust = &models.ElasticsearchClusterTrustSettings{} - } - - es.Trust.Accounts = append(es.Trust.Accounts, accounts...) -} - -func expandExternalTrust(raw []interface{}, es *models.ElasticsearchClusterSettings) { - var external []*models.ExternalTrustRelationship - for _, rawTrust := range raw { - m := rawTrust.(map[string]interface{}) - - var id string - if v, ok := m["relationship_id"].(string); ok { - id = v - } - - var all bool - if a, ok := m["trust_all"].(bool); ok { - all = a - } - - var allowlist []string - if al, ok := m["trust_allowlist"].(*schema.Set); ok && al.Len() > 0 { - allowlist = util.ItemsToString(al.List()) - } - - external = append(external, &models.ExternalTrustRelationship{ - TrustRelationshipID: &id, - TrustAll: &all, - TrustAllowlist: allowlist, - }) - } - - if len(external) == 0 { - return - } - - if es.Trust == nil { - es.Trust = &models.ElasticsearchClusterTrustSettings{} - } - - es.Trust.External = append(es.Trust.External, external...) -} diff --git a/ec/ecresource/deploymentresource/elasticsearch_flatteners.go b/ec/ecresource/deploymentresource/elasticsearch_flatteners.go deleted file mode 100644 index 512edc7d5..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_flatteners.go +++ /dev/null @@ -1,359 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - "fmt" - "sort" - "strconv" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenEsResources takes in Elasticsearch resource models and returns its -// flattened form. -func flattenEsResources(in []*models.ElasticsearchResourceInfo, name string, remotes models.RemoteResources) ([]interface{}, error) { - result := make([]interface{}, 0, len(in)) - for _, res := range in { - m := make(map[string]interface{}) - if util.IsCurrentEsPlanEmpty(res) || isEsResourceStopped(res) { - continue - } - - if res.Info.ClusterID != nil && *res.Info.ClusterID != "" { - m["resource_id"] = *res.Info.ClusterID - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - topology, err := flattenEsTopology(plan) - if err != nil { - return nil, err - } - if len(topology) > 0 { - m["topology"] = topology - } - - if plan.AutoscalingEnabled != nil { - m["autoscale"] = strconv.FormatBool(*plan.AutoscalingEnabled) - } - - if meta := res.Info.Metadata; meta != nil && meta.CloudID != "" { - m["cloud_id"] = meta.CloudID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - m["config"] = flattenEsConfig(plan.Elasticsearch) - - if remotes := flattenEsRemotes(remotes); remotes.Len() > 0 { - m["remote_cluster"] = remotes - } - - extensions := schema.NewSet(esExtensionHash, nil) - for _, ext := range flattenEsBundles(plan.Elasticsearch.UserBundles) { - extensions.Add(ext) - } - - for _, ext := range flattenEsPlugins(plan.Elasticsearch.UserPlugins) { - extensions.Add(ext) - } - - if extensions.Len() > 0 { - m["extension"] = extensions - } - - if settings := res.Info.Settings; settings != nil { - if trust := flattenAccountTrust(settings.Trust); trust != nil { - m["trust_account"] = trust - } - - if trust := flattenExternalTrust(settings.Trust); trust != nil { - m["trust_external"] = trust - } - } - - result = append(result, m) - } - - return result, nil -} - -func isPotentiallySizedTopology(topology *models.ElasticsearchClusterTopologyElement, isAutoscaling bool) bool { - currentlySized := topology.Size != nil && topology.Size.Value != nil && *topology.Size.Value > 0 - canBeSized := isAutoscaling && topology.AutoscalingMax != nil && topology.AutoscalingMax.Value != nil && *topology.AutoscalingMax.Value > 0 - - return currentlySized || canBeSized -} - -func flattenEsTopology(plan *models.ElasticsearchClusterPlan) ([]interface{}, error) { - result := make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if !isPotentiallySizedTopology(topology, plan.AutoscalingEnabled != nil && *plan.AutoscalingEnabled) { - continue - } - - // ID is always set. - m["id"] = topology.ID - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - // TODO: Check legacy plans. - // if topology.MemoryPerNode > 0 { - // m["size"] = strconv.Itoa(int(topology.MemoryPerNode)) - // } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - m["zone_count"] = topology.ZoneCount - - if nt := topology.NodeType; nt != nil { - if nt.Data != nil { - m["node_type_data"] = strconv.FormatBool(*nt.Data) - } - - if nt.Ingest != nil { - m["node_type_ingest"] = strconv.FormatBool(*nt.Ingest) - } - - if nt.Master != nil { - m["node_type_master"] = strconv.FormatBool(*nt.Master) - } - - if nt.Ml != nil { - m["node_type_ml"] = strconv.FormatBool(*nt.Ml) - } - } - - if len(topology.NodeRoles) > 0 { - m["node_roles"] = schema.NewSet(schema.HashString, util.StringToItems( - topology.NodeRoles..., - )) - } - - autoscaling := make(map[string]interface{}) - if ascale := topology.AutoscalingMax; ascale != nil { - autoscaling["max_size_resource"] = *ascale.Resource - autoscaling["max_size"] = util.MemoryToState(*ascale.Value) - } - - if ascale := topology.AutoscalingMin; ascale != nil { - autoscaling["min_size_resource"] = *ascale.Resource - autoscaling["min_size"] = util.MemoryToState(*ascale.Value) - } - - if topology.AutoscalingPolicyOverrideJSON != nil { - b, err := json.Marshal(topology.AutoscalingPolicyOverrideJSON) - if err != nil { - return nil, fmt.Errorf( - "elasticsearch topology %s: unable to persist policy_override_json: %w", - topology.ID, err, - ) - } - autoscaling["policy_override_json"] = string(b) - } - - if len(autoscaling) > 0 { - m["autoscaling"] = []interface{}{autoscaling} - } - - // Computed config object to avoid unsetting legacy topology config settings. - m["config"] = flattenEsConfig(topology.Elasticsearch) - - result = append(result, m) - } - - // Ensure the topologies are sorted alphabetically by ID. - sort.SliceStable(result, func(i, j int) bool { - a := result[i].(map[string]interface{}) - b := result[j].(map[string]interface{}) - return a["id"].(string) < b["id"].(string) - }) - return result, nil -} - -func flattenEsConfig(cfg *models.ElasticsearchConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if len(cfg.EnabledBuiltInPlugins) > 0 { - m["plugins"] = schema.NewSet(schema.HashString, - util.StringToItems(cfg.EnabledBuiltInPlugins...), - ) - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - // If no settings are set, there's no need to store the empty values in the - // state and makes the state consistent with a clean import return. - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func flattenEsRemotes(in models.RemoteResources) *schema.Set { - res := newElasticsearchRemoteSet() - for _, r := range in.Resources { - var m = make(map[string]interface{}) - if r.DeploymentID != nil && *r.DeploymentID != "" { - m["deployment_id"] = *r.DeploymentID - } - - if r.ElasticsearchRefID != nil && *r.ElasticsearchRefID != "" { - m["ref_id"] = *r.ElasticsearchRefID - } - - if r.Alias != nil && *r.Alias != "" { - m["alias"] = *r.Alias - } - - if r.SkipUnavailable != nil { - m["skip_unavailable"] = *r.SkipUnavailable - } - res.Add(m) - } - - return res -} - -func newElasticsearchRemoteSet(remotes ...interface{}) *schema.Set { - return schema.NewSet( - schema.HashResource(elasticsearchRemoteCluster().Elem.(*schema.Resource)), - remotes, - ) -} - -func flattenEsBundles(in []*models.ElasticsearchUserBundle) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, bundle := range in { - m := make(map[string]interface{}) - m["type"] = "bundle" - m["version"] = *bundle.ElasticsearchVersion - m["url"] = *bundle.URL - m["name"] = *bundle.Name - - result = append(result, m) - } - - return result -} - -func flattenEsPlugins(in []*models.ElasticsearchUserPlugin) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, plugin := range in { - m := make(map[string]interface{}) - m["type"] = "plugin" - m["version"] = *plugin.ElasticsearchVersion - m["url"] = *plugin.URL - m["name"] = *plugin.Name - - result = append(result, m) - } - - return result -} - -func flattenAccountTrust(in *models.ElasticsearchClusterTrustSettings) *schema.Set { - if in == nil { - return nil - } - - account := schema.NewSet(schema.HashResource(accountResource()), nil) - for _, acc := range in.Accounts { - account.Add(map[string]interface{}{ - "account_id": *acc.AccountID, - "trust_all": *acc.TrustAll, - "trust_allowlist": schema.NewSet(schema.HashString, - util.StringToItems(acc.TrustAllowlist...), - ), - }) - } - - if account.Len() > 0 { - return account - } - return nil -} - -func flattenExternalTrust(in *models.ElasticsearchClusterTrustSettings) *schema.Set { - if in == nil { - return nil - } - - external := schema.NewSet(schema.HashResource(externalResource()), nil) - for _, ext := range in.External { - external.Add(map[string]interface{}{ - "relationship_id": *ext.TrustRelationshipID, - "trust_all": *ext.TrustAll, - "trust_allowlist": schema.NewSet(schema.HashString, - util.StringToItems(ext.TrustAllowlist...), - ), - }) - } - - if external.Len() > 0 { - return external - } - return nil -} diff --git a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go deleted file mode 100644 index be2f2535a..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders.go +++ /dev/null @@ -1,79 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func handleRemoteClusters(d *schema.ResourceData, client *api.API) error { - if keyIsEmptyUnchanged(d, "elasticsearch.0.remote_cluster") { - return nil - } - - remoteResources := expandRemoteClusters( - d.Get("elasticsearch.0.remote_cluster").(*schema.Set), - ) - - return esremoteclustersapi.Update(esremoteclustersapi.UpdateParams{ - API: client, - DeploymentID: d.Id(), - RefID: d.Get("elasticsearch.0.ref_id").(string), - RemoteResources: remoteResources, - }) -} - -func expandRemoteClusters(set *schema.Set) *models.RemoteResources { - res := models.RemoteResources{Resources: []*models.RemoteResourceRef{}} - - for _, r := range set.List() { - var resourceRef models.RemoteResourceRef - m := r.(map[string]interface{}) - - if id, ok := m["deployment_id"]; ok { - resourceRef.DeploymentID = ec.String(id.(string)) - } - - if v, ok := m["ref_id"]; ok { - resourceRef.ElasticsearchRefID = ec.String(v.(string)) - } - - if v, ok := m["alias"]; ok { - resourceRef.Alias = ec.String(v.(string)) - } - - if v, ok := m["skip_unavailable"]; ok { - resourceRef.SkipUnavailable = ec.Bool(v.(bool)) - } - - res.Resources = append(res.Resources, &resourceRef) - } - - return &res -} - -func keyIsEmptyUnchanged(d *schema.ResourceData, k string) bool { - old, new := d.GetChange(k) - oldSlice := old.(*schema.Set) - newSlice := new.(*schema.Set) - return oldSlice.Len() == 0 && newSlice.Len() == 0 -} diff --git a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go b/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go deleted file mode 100644 index a79adaaee..000000000 --- a/ec/ecresource/deploymentresource/elasticsearch_remote_cluster_expanders_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_handleRemoteClusters(t *testing.T) { - deploymentEmptyRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentEmptyRD(), - Schema: newSchema(), - }) - deploymentWithRemotesRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "remote_cluster": []interface{}{ - map[string]interface{}{ - "alias": "alias", - "deployment_id": "someid", - "ref_id": "main-elasticsearch", - "skip_unavailable": true, - }, - map[string]interface{}{ - "deployment_id": "some other id", - "ref_id": "main-elasticsearch", - }, - }, - }}, - }, - Schema: newSchema(), - }) - type args struct { - d *schema.ResourceData - client *api.API - } - tests := []struct { - name string - args args - err error - }{ - { - name: "returns when the resource has no remote clusters", - args: args{ - d: deploymentEmptyRD, - client: api.NewMock(), - }, - }, - { - name: "flattens the remote clusters", - args: args{ - d: deploymentWithRemotesRD, - client: api.NewMock(mock.New202ResponseAssertion( - &mock.RequestAssertion{ - Header: api.DefaultWriteMockHeaders, - Host: api.DefaultMockHost, - Path: `/api/v1/deployments/320b7b540dfc967a7a649c18e2fce4ed/elasticsearch/main-elasticsearch/remote-clusters`, - Method: "PUT", - Body: mock.NewStringBody(`{"resources":[{"alias":"alias","deployment_id":"someid","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":true},{"alias":"","deployment_id":"some other id","elasticsearch_ref_id":"main-elasticsearch","skip_unavailable":false}]}` + "\n"), - }, - mock.NewStringBody("{}"), - )), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := handleRemoteClusters(tt.args.d, tt.args.client) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - }) - } -} - -func Test_expandRemoteClusters(t *testing.T) { - type args struct { - set *schema.Set - } - tests := []struct { - name string - args args - want *models.RemoteResources - }{ - { - name: "wants no error or empty res", - args: args{set: newElasticsearchRemoteSet()}, - want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{}}, - }, - { - name: "expands remotes", - args: args{set: newElasticsearchRemoteSet([]interface{}{ - map[string]interface{}{ - "alias": "alias", - "deployment_id": "someid", - "ref_id": "main-elasticsearch", - "skip_unavailable": true, - }, - map[string]interface{}{ - "deployment_id": "some other id", - "ref_id": "main-elasticsearch", - }, - }...)}, - want: &models.RemoteResources{Resources: []*models.RemoteResourceRef{ - { - DeploymentID: ec.String("some other id"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - }, - { - Alias: ec.String("alias"), - DeploymentID: ec.String("someid"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - SkipUnavailable: ec.Bool(true), - }, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := expandRemoteClusters(tt.args.set) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/enterprise_search_expanders.go b/ec/ecresource/deploymentresource/enterprise_search_expanders.go deleted file mode 100644 index 01de017ed..000000000 --- a/ec/ecresource/deploymentresource/enterprise_search_expanders.go +++ /dev/null @@ -1,219 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandEssResources expands Enterprise Search resources into their models. -func expandEssResources(ess []interface{}, tpl *models.EnterpriseSearchPayload) ([]*models.EnterpriseSearchPayload, error) { - if len(ess) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search") - } - - result := make([]*models.EnterpriseSearchPayload, 0, len(ess)) - for _, raw := range ess { - resResource, err := expandEssResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandEssResource(raw interface{}, res *models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, error) { - ess := raw.(map[string]interface{}) - - if esRefID, ok := ess["elasticsearch_cluster_ref_id"].(string); ok { - res.ElasticsearchClusterRefID = ec.String(esRefID) - } - - if refID, ok := ess["ref_id"].(string); ok { - res.RefID = ec.String(refID) - } - - if version, ok := ess["version"].(string); ok { - res.Plan.EnterpriseSearch.Version = version - } - - if region, ok := ess["region"].(string); ok && region != "" { - res.Region = ec.String(region) - } - - if cfg, ok := ess["config"].([]interface{}); ok { - if err := expandEssConfig(cfg, res.Plan.EnterpriseSearch); err != nil { - return nil, err - } - } - - if rt, ok := ess["topology"].([]interface{}); ok && len(rt) > 0 { - topology, err := expandEssTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultEssTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandEssTopology(rawTopologies []interface{}, topologies []*models.EnterpriseSearchTopologyElement) ([]*models.EnterpriseSearchTopologyElement, error) { - res := make([]*models.EnterpriseSearchTopologyElement, 0, len(rawTopologies)) - for i, rawTop := range rawTopologies { - topology, ok := rawTop.(map[string]interface{}) - if !ok { - continue - } - - var icID string - if id, ok := topology["instance_configuration_id"].(string); ok { - icID = id - } - - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultEssTopology(topologies); icID == "" && len(t) > i { - icID = t[i].InstanceConfigurationID - } - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - // Since Enterprise Search is not enabled by default in the template, - // if the size == nil, it means that the size hasn't been specified in - // the definition. - if size == nil { - size = &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(minimumEnterpriseSearchSize), - } - } - - elem, err := matchEssTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"].(int); ok && zones > 0 { - elem.ZoneCount = int32(zones) - } - - res = append(res, elem) - } - - return res, nil -} - -func expandEssConfig(raw []interface{}, res *models.EnterpriseSearchConfiguration) error { - for _, rawCfg := range raw { - cfg, ok := rawCfg.(map[string]interface{}) - if !ok { - continue - } - - if settings, ok := cfg["user_settings_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding enterprise_search user_settings_json: %w", err) - } - } - if settings, ok := cfg["user_settings_override_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding enterprise_search user_settings_override_json: %w", err) - } - } - if settings, ok := cfg["user_settings_yaml"].(string); ok && settings != "" { - res.UserSettingsYaml = settings - } - if settings, ok := cfg["user_settings_override_yaml"].(string); ok && settings != "" { - res.UserSettingsOverrideYaml = settings - } - - if v, ok := cfg["docker_image"].(string); ok { - res.DockerImage = v - } - } - - return nil -} - -// defaultApmTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is smaller than the -// deployment template default, the same is done on the ZoneCount. -func defaultEssTopology(topology []*models.EnterpriseSearchTopologyElement) []*models.EnterpriseSearchTopologyElement { - for _, t := range topology { - if *t.Size.Value < minimumEnterpriseSearchSize || *t.Size.Value == 0 { - t.Size.Value = ec.Int32(minimumEnterpriseSearchSize) - } - if t.ZoneCount < minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchEssTopology(id string, topologies []*models.EnterpriseSearchTopologyElement) (*models.EnterpriseSearchTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `enterprise_search topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// essResource returns the EnterpriseSearchPayload from a deployment -// template or an empty version of the payload. -func essResource(res *models.DeploymentTemplateInfoV2) *models.EnterpriseSearchPayload { - if len(res.DeploymentTemplate.Resources.EnterpriseSearch) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.EnterpriseSearch[0] -} - -// essResourceFromUpdate returns the EnterpriseSearchPayload from a deployment -// update request or an empty version of the payload. -func essResourceFromUpdate(res *models.DeploymentUpdateResources) *models.EnterpriseSearchPayload { - if len(res.EnterpriseSearch) == 0 { - return nil - } - return res.EnterpriseSearch[0] -} diff --git a/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go b/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go deleted file mode 100644 index 021d50dd0..000000000 --- a/ec/ecresource/deploymentresource/enterprise_search_expanders_test.go +++ /dev/null @@ -1,421 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" -) - -func Test_expandEssResources(t *testing.T) { - tpl := func() *models.EnterpriseSearchPayload { - return essResource(parseDeploymentTemplate(t, - "testdata/template-aws-io-optimized-v2.json", - )) - } - type args struct { - ess []interface{} - tpl *models.EnterpriseSearchPayload - } - tests := []struct { - name string - args args - want []*models.EnterpriseSearchPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses an enterprise_search resource with explicit topology", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "2g", - "zone_count": 1, - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with no topology takes the minimum size", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with topology but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "4g", - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with multiple topologies but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{ - map[string]interface{}{ - "size": "4g", - }, map[string]interface{}{ - "size": "4g", - }, - }, - }}, - }, - err: errors.New("enterprise_search topology: invalid instance_configuration_id: \"\" doesn't match any of the deployment template instance configurations"), - }, - { - name: "parses an enterprise_search resource with topology but instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with topology and zone_count", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "zone_count": 1, - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with explicit topology and config", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "secondary-enterprise_search", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: override", - "user_settings_json": `{"some.setting":"value"}`, - "user_settings_override_json": `{"some.setting":"override"}`, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "4g", - "zone_count": 1, - "node_type_appserver": true, - "node_type_connector": true, - "node_type_worker": true, - }}, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("secondary-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: "some.setting: value", - UserSettingsOverrideYaml: "some.setting: override", - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "override", - }, - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with explicit nils", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "secondary-enterprise_search", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": nil, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": nil, - "user_settings_override_yaml": nil, - "user_settings_json": nil, - "user_settings_override_json": nil, - }}, - "topology": nil, - }}, - }, - want: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("us-east-1"), - RefID: ec.String("secondary-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }}, - }, - }}, - }, - { - name: "parses an enterprise_search resource with invalid instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5", - "size": "2g", - "zone_count": 1, - }}, - }}, - }, - err: errors.New(`enterprise_search topology: invalid instance_configuration_id: "aws.enterprisesearch.m5" doesn't match any of the deployment template instance configurations`), - }, - { - name: "tries to parse an enterprise_search resource when the template doesn't have an Enterprise Search instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-enterprise_search", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "version": "7.8.0", - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\": \"value\"}", - "user_settings_override_json": "{\"some.setting\": \"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprise_search.r5d", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - err: errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandEssResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/enterprise_search_flatteners.go b/ec/ecresource/deploymentresource/enterprise_search_flatteners.go deleted file mode 100644 index cc2560724..000000000 --- a/ec/ecresource/deploymentresource/enterprise_search_flatteners.go +++ /dev/null @@ -1,149 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenEssResources flattens Enterprise Search resources into its flattened structure. -func flattenEssResources(in []*models.EnterpriseSearchResourceInfo, name string) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, res := range in { - m := make(map[string]interface{}) - if util.IsCurrentEssPlanEmpty(res) || isEssResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ID != nil && *res.Info.ID != "" { - m["resource_id"] = *res.Info.ID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenEssTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - if urls := util.FlattenClusterEndpoint(res.Info.Metadata); len(urls) > 0 { - for k, v := range urls { - m[k] = v - } - } - - if c := flattenEssConfig(plan.EnterpriseSearch); len(c) > 0 { - m["config"] = c - } - - result = append(result, m) - } - - return result -} - -func flattenEssTopology(plan *models.EnterpriseSearchPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - if nt := topology.NodeType; nt != nil { - if nt.Appserver != nil { - m["node_type_appserver"] = *nt.Appserver - } - - if nt.Connector != nil { - m["node_type_connector"] = *nt.Connector - } - - if nt.Worker != nil { - m["node_type_worker"] = *nt.Worker - } - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenEssConfig(cfg *models.EnterpriseSearchConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go new file mode 100644 index 000000000..1e54d83d9 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search.go @@ -0,0 +1,46 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type EnterpriseSearch struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology EnterpriseSearchTopologies `tfsdk:"topology"` + Config EnterpriseSearchConfigs `tfsdk:"config"` +} + +type EnterpriseSearches []EnterpriseSearch diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go new file mode 100644 index 000000000..a618e4270 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_config.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type EnterpriseSearchConfig struct { + DockerImage *string `tfsdk:"docker_image"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type EnterpriseSearchConfigs []EnterpriseSearchConfig diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go new file mode 100644 index 000000000..f6b39b2cc --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/enterprise_search_topology.go @@ -0,0 +1,44 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchTopologyTF struct { + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeAppserver types.Bool `tfsdk:"node_type_appserver"` + NodeTypeConnector types.Bool `tfsdk:"node_type_connector"` + NodeTypeWorker types.Bool `tfsdk:"node_type_worker"` +} + +type EnterpriseSearchTopology struct { + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeAppserver *bool `tfsdk:"node_type_appserver"` + NodeTypeConnector *bool `tfsdk:"node_type_connector"` + NodeTypeWorker *bool `tfsdk:"node_type_worker"` +} + +type EnterpriseSearchTopologies []EnterpriseSearchTopology diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go new file mode 100644 index 000000000..112d40cf5 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v1/schema.go @@ -0,0 +1,176 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func EnterpriseSearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Enterprise Search resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-enterprise_search"}), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "topology": { + Description: "Optional topology attribute", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_appserver": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_connector": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_worker": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "config": { + Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go new file mode 100644 index 000000000..5bfaacd32 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_config.go @@ -0,0 +1,96 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type EnterpriseSearchConfig v1.EnterpriseSearchConfig + +func readEnterpriseSearchConfig(in *models.EnterpriseSearchConfiguration) (*EnterpriseSearchConfig, error) { + var cfg EnterpriseSearchConfig + + if in == nil { + return nil, nil + } + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if cfg == (EnterpriseSearchConfig{}) { + return nil, nil + } + + return &cfg, nil +} + +func enterpriseSearchConfigPayload(ctx context.Context, cfg v1.EnterpriseSearchConfigTF, res *models.EnterpriseSearchConfiguration) diag.Diagnostics { + var diags diag.Diagnostics + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &res.UserSettingsJSON); err != nil { + diags.AddError("failed expanding enterprise_search user_settings_json", err.Error()) + } + } + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &res.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding enterprise_search user_settings_override_json", err.Error()) + } + } + if !cfg.UserSettingsYaml.IsNull() { + res.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + if !cfg.UserSettingsOverrideYaml.IsNull() { + res.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + res.DockerImage = cfg.DockerImage.Value + } + + return diags +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go new file mode 100644 index 000000000..3d2aa556c --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload.go @@ -0,0 +1,134 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type EnterpriseSearchTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + NodeTypeAppserver types.Bool `tfsdk:"node_type_appserver"` + NodeTypeConnector types.Bool `tfsdk:"node_type_connector"` + NodeTypeWorker types.Bool `tfsdk:"node_type_worker"` + Config types.Object `tfsdk:"config"` +} + +func (es *EnterpriseSearchTF) payload(ctx context.Context, payload models.EnterpriseSearchPayload) (*models.EnterpriseSearchPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !es.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &es.ElasticsearchClusterRefId.Value + } + + if !es.RefId.IsNull() { + payload.RefID = &es.RefId.Value + } + + if es.Region.Value != "" { + payload.Region = &es.Region.Value + } + + if !es.Config.IsNull() && !es.Config.IsUnknown() { + var config *v1.EnterpriseSearchConfigTF + + ds := tfsdk.ValueAs(ctx, es.Config, &config) + + diags.Append(ds...) + + if !ds.HasError() && config != nil { + diags.Append(enterpriseSearchConfigPayload(ctx, *config, payload.Plan.EnterpriseSearch)...) + } + } + + topologyTF := v1.EnterpriseSearchTopologyTF{ + InstanceConfigurationId: es.InstanceConfigurationId, + Size: es.Size, + SizeResource: es.SizeResource, + ZoneCount: es.ZoneCount, + NodeTypeAppserver: es.NodeTypeAppserver, + NodeTypeConnector: es.NodeTypeConnector, + NodeTypeWorker: es.NodeTypeWorker, + } + + topology, ds := enterpriseSearchTopologyPayload(ctx, topologyTF, defaultTopology(payload.Plan.ClusterTopology), 0) + + diags = append(diags, ds...) + + if topology != nil { + payload.Plan.ClusterTopology = []*models.EnterpriseSearchTopologyElement{topology} + } + + return &payload, diags +} + +func EnterpriseSearchesPayload(ctx context.Context, esObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.EnterpriseSearchPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + var es *EnterpriseSearchTF + + if diags = tfsdk.ValueAs(ctx, esObj, &es); diags.HasError() { + return nil, diags + } + + if es == nil { + return nil, nil + } + + templatePayload := payloadFromTemplate(template) + + if templatePayload == nil { + diags.AddError( + "enterprise_search payload error", + "enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search", + ) + return nil, diags + } + + payload, diags := es.payload(ctx, *templatePayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// payloadFromTemplate returns the EnterpriseSearchPayload from a deployment +// template or an empty version of the payload. +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.EnterpriseSearchPayload { + if template == nil || len(template.DeploymentTemplate.Resources.EnterpriseSearch) == 0 { + return nil + } + return template.DeploymentTemplate.Resources.EnterpriseSearch[0] +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go new file mode 100644 index 000000000..602c908f8 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_payload_test.go @@ -0,0 +1,338 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_enterpriseSearchPayload(t *testing.T) { + type args struct { + es *EnterpriseSearch + template *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.EnterpriseSearchPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses an enterprise_search resource with explicit topology", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }, + }, + }, + }, + }, + { + name: "parses an enterprise_search resource with no topology takes the minimum size", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with topology but no instance_configuration_id", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("4g"), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with topology but instance_configuration_id", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 2, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with topology and zone_count", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + ZoneCount: 1, + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with explicit topology and config", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("secondary-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &EnterpriseSearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: override"), + UserSettingsJson: ec.String(`{"some.setting":"value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting":"override"}`), + }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("4g"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeConnector: ec.Bool(true), + NodeTypeWorker: ec.Bool(true), + }, + }, + want: &models.EnterpriseSearchPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("secondary-enterprise_search"), + Plan: &models.EnterpriseSearchPlan{ + EnterpriseSearch: &models.EnterpriseSearchConfiguration{ + UserSettingsYaml: "some.setting: value", + UserSettingsOverrideYaml: "some.setting: override", + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "override", + }, + }, + ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.enterprisesearch.m5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + NodeType: &models.EnterpriseSearchNodeTypes{ + Appserver: ec.Bool(true), + Connector: ec.Bool(true), + Worker: ec.Bool(true), + }, + }}, + }, + }, + }, + { + name: "parses an enterprise_search resource with invalid instance_configuration_id", + args: args{ + template: testutil.ParseDeploymentTemplate(t, "../../testdata/template-aws-io-optimized-v2.json"), + es: &EnterpriseSearch{ + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("cannot match enterprise search topology", `invalid instance_configuration_id: "aws.enterprisesearch.m5" doesn't match any of the deployment template instance configurations`) + return diags + }(), + }, + { + name: "tries to parse an enterprise_search resource when the template doesn't have an Enterprise Search instance set.", + args: args{ + template: nil, + es: &EnterpriseSearch{ + RefId: ec.String("tertiary-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &EnterpriseSearchConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String(`{"some.setting": "value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting": "value2"}`), + }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.m5d"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("enterprise_search payload error", `enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search`) + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var ess types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.es, EnterpriseSearchSchema().FrameworkType(), &ess) + assert.Nil(t, diags) + + got, diags := EnterpriseSearchesPayload(context.Background(), ess, tt.args.template) + if tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } + + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go new file mode 100644 index 000000000..913f72bef --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read.go @@ -0,0 +1,110 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type EnterpriseSearch struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + NodeTypeAppserver *bool `tfsdk:"node_type_appserver"` + NodeTypeConnector *bool `tfsdk:"node_type_connector"` + NodeTypeWorker *bool `tfsdk:"node_type_worker"` + Config *EnterpriseSearchConfig `tfsdk:"config"` +} + +type EnterpriseSearches []EnterpriseSearch + +func ReadEnterpriseSearch(in *models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { + if util.IsCurrentEssPlanEmpty(in) || IsEnterpriseSearchStopped(in) { + return nil, nil + } + + var ess EnterpriseSearch + + ess.RefId = in.RefID + + ess.ResourceId = in.Info.ID + + ess.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := readEnterpriseSearchTopologies(plan.ClusterTopology) + + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + ess.InstanceConfigurationId = topologies[0].InstanceConfigurationId + ess.Size = topologies[0].Size + ess.SizeResource = topologies[0].SizeResource + ess.ZoneCount = topologies[0].ZoneCount + ess.NodeTypeAppserver = topologies[0].NodeTypeAppserver + ess.NodeTypeConnector = topologies[0].NodeTypeConnector + ess.NodeTypeWorker = topologies[0].NodeTypeWorker + } + + ess.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + ess.HttpEndpoint, ess.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + cfg, err := readEnterpriseSearchConfig(plan.EnterpriseSearch) + if err != nil { + return nil, err + } + ess.Config = cfg + + return &ess, nil +} + +func ReadEnterpriseSearches(in []*models.EnterpriseSearchResourceInfo) (*EnterpriseSearch, error) { + for _, model := range in { + if util.IsCurrentEssPlanEmpty(model) || IsEnterpriseSearchStopped(model) { + continue + } + + es, err := ReadEnterpriseSearch(model) + if err != nil { + return nil, err + } + + return es, nil + } + + return nil, nil +} + +// IsEnterpriseSearchStopped returns true if the resource is stopped. +func IsEnterpriseSearchStopped(res *models.EnterpriseSearchResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go similarity index 69% rename from ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go rename to ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go index eb70eb6c7..d67b70935 100644 --- a/ec/ecresource/deploymentresource/enterprise_search_flatteners_test.go +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_read_test.go @@ -15,31 +15,34 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) -func Test_flattenEssResource(t *testing.T) { +func Test_readEnterpriseSearch(t *testing.T) { type args struct { - in []*models.EnterpriseSearchResourceInfo - name string + in []*models.EnterpriseSearchResourceInfo } tests := []struct { name string args args - want []interface{} + want *EnterpriseSearch }{ { name: "empty resource list returns empty list", args: args{in: []*models.EnterpriseSearchResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -52,7 +55,7 @@ func Test_flattenEssResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses the enterprisesearch resource", @@ -154,35 +157,68 @@ func Test_flattenEssResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://enterprisesearchresource.cloud.elastic.co:9200", - "https_endpoint": "https://enterprisesearchresource.cloud.elastic.co:9243", - "config": []interface{}{map[string]interface{}{ - "user_settings_json": "{\"some.setting\":\"some other value\"}", - "user_settings_override_json": "{\"some.setting\":\"some other override\"}", - "user_settings_override_yaml": "some.setting: some override", - "user_settings_yaml": "some.setting: some value", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - "node_type_appserver": true, - "node_type_worker": false, - }}, + want: &EnterpriseSearch{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-enterprise_search"), + ResourceId: ec.String(mock.ValidClusterID), + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://enterprisesearchresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://enterprisesearchresource.cloud.elastic.co:9243"), + Config: &EnterpriseSearchConfig{ + UserSettingsJson: ec.String("{\"some.setting\":\"some other value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"some other override\"}"), + UserSettingsOverrideYaml: ec.String("some.setting: some override"), + UserSettingsYaml: ec.String("some.setting: some value"), }, + InstanceConfigurationId: ec.String("aws.enterprisesearch.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + NodeTypeAppserver: ec.Bool(true), + NodeTypeWorker: ec.Bool(false), }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenEssResources(tt.args.in, tt.args.name) + got, err := ReadEnterpriseSearches(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, got) + + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), got, EnterpriseSearchSchema().FrameworkType(), &obj) + assert.Nil(t, diags) + }) + } +} + +func Test_IsEnterpriseSearchStopped(t *testing.T) { + type args struct { + res *models.EnterpriseSearchResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsEnterpriseSearchStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go new file mode 100644 index 000000000..35f6e846d --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/enterprise_search_topology.go @@ -0,0 +1,160 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v1" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +const ( + minimumEnterpriseSearchSize = 2048 +) + +type enterpriseSearchTopologies v1.EnterpriseSearchTopologies + +func readEnterpriseSearchTopology(in *models.EnterpriseSearchTopologyElement) (*v1.EnterpriseSearchTopology, error) { + var topology v1.EnterpriseSearchTopology + + topology.InstanceConfigurationId = ec.String(in.InstanceConfigurationID) + + if in.Size != nil { + topology.Size = ec.String(util.MemoryToState(*in.Size.Value)) + topology.SizeResource = in.Size.Resource + } + + if nt := in.NodeType; nt != nil { + if nt.Appserver != nil { + topology.NodeTypeAppserver = nt.Appserver + } + + if nt.Connector != nil { + topology.NodeTypeConnector = nt.Connector + } + + if nt.Worker != nil { + topology.NodeTypeWorker = nt.Worker + } + } + + topology.ZoneCount = int(in.ZoneCount) + + return &topology, nil +} + +func readEnterpriseSearchTopologies(in []*models.EnterpriseSearchTopologyElement) (enterpriseSearchTopologies, error) { + if len(in) == 0 { + return nil, nil + } + + topologies := make(enterpriseSearchTopologies, 0, len(in)) + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + topology, err := readEnterpriseSearchTopology(model) + if err != nil { + return nil, err + } + + topologies = append(topologies, *topology) + } + + return topologies, nil +} + +func enterpriseSearchTopologyPayload(ctx context.Context, topology v1.EnterpriseSearchTopologyTF, planModels []*models.EnterpriseSearchTopologyElement, index int) (*models.EnterpriseSearchTopologyElement, diag.Diagnostics) { + var diags diag.Diagnostics + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + elem, err := matchTopology(icID, planModels) + if err != nil { + diags.AddError("cannot match enterprise search topology", err.Error()) + return nil, diags + } + + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) + + if err != nil { + diags.AddError("failed parse enterprise search topology size", err.Error()) + return nil, diags + } + + // Since Enterprise Search is not enabled by default in the template, + // if the size == nil, it means that the size hasn't been specified in + // the definition. + if size == nil { + size = &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(minimumEnterpriseSearchSize), + } + } + + elem.Size = size + + if topology.ZoneCount.Value > 0 { + elem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return elem, nil +} + +// defaultTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is smaller than the +// deployment template default, the same is done on the ZoneCount. +func defaultTopology(topology []*models.EnterpriseSearchTopologyElement) []*models.EnterpriseSearchTopologyElement { + for _, t := range topology { + if *t.Size.Value < minimumEnterpriseSearchSize || *t.Size.Value == 0 { + t.Size.Value = ec.Int32(minimumEnterpriseSearchSize) + } + if t.ZoneCount < utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func matchTopology(id string, topologies []*models.EnterpriseSearchTopologyElement) (*models.EnterpriseSearchTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} diff --git a/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go new file mode 100644 index 000000000..4b71e8143 --- /dev/null +++ b/ec/ecresource/deploymentresource/enterprisesearch/v2/schema.go @@ -0,0 +1,163 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func EnterpriseSearchSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Enterprise Search resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-enterprise_search"}), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_appserver": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_connector": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "node_type_worker": { + Type: types.BoolType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "config": { + Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/expanders.go b/ec/ecresource/deploymentresource/expanders.go deleted file mode 100644 index 2b28069ac..000000000 --- a/ec/ecresource/deploymentresource/expanders.go +++ /dev/null @@ -1,431 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "fmt" - "sort" - - semver "github.com/blang/semver/v4" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deptemplateapi" - "github.com/elastic/cloud-sdk-go/pkg/client/deployments" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -var ( - dataTiersVersion = semver.MustParse("7.10.0") -) - -func createResourceToModel(d *schema.ResourceData, client *api.API) (*models.DeploymentCreateRequest, error) { - var result = models.DeploymentCreateRequest{ - Name: d.Get("name").(string), - Alias: d.Get("alias").(string), - Resources: &models.DeploymentCreateResources{}, - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{}, - } - - dtID := d.Get("deployment_template_id").(string) - version := d.Get("version").(string) - template, err := deptemplateapi.Get(deptemplateapi.GetParams{ - API: client, - TemplateID: dtID, - Region: d.Get("region").(string), - HideInstanceConfigurations: true, - }) - if err != nil { - return nil, err - } - - useNodeRoles, err := compatibleWithNodeRoles(version) - if err != nil { - return nil, err - } - - merr := multierror.NewPrefixed("invalid configuration") - esRes, err := expandEsResources( - d.Get("elasticsearch").([]interface{}), - enrichElasticsearchTemplate( - esResource(template), dtID, version, useNodeRoles, - ), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, esRes...) - - kibanaRes, err := expandKibanaResources( - d.Get("kibana").([]interface{}), kibanaResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Kibana = append(result.Resources.Kibana, kibanaRes...) - - apmRes, err := expandApmResources( - d.Get("apm").([]interface{}), apmResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Apm = append(result.Resources.Apm, apmRes...) - - integrationsServerRes, err := expandIntegrationsServerResources( - d.Get("integrations_server").([]interface{}), integrationsServerResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerRes...) - - enterpriseSearchRes, err := expandEssResources( - d.Get("enterprise_search").([]interface{}), essResource(template), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchRes...) - - if err := merr.ErrorOrNil(); err != nil { - return nil, err - } - - expandTrafficFilterCreate(d.Get("traffic_filter").(*schema.Set), &result) - - observability, err := expandObservability(d.Get("observability").([]interface{}), client) - if err != nil { - return nil, err - } - result.Settings.Observability = observability - - result.Metadata.Tags = expandTags(d.Get("tags").(map[string]interface{})) - - return &result, nil -} - -func getBaseUpdatePayloads(d *schema.ResourceData, client *api.API) (*models.DeploymentUpdateResources, error) { - prevDtId, dtIdIf := d.GetChange("deployment_template_id") - dtId := dtIdIf.(string) - template, err := deptemplateapi.Get(deptemplateapi.GetParams{ - API: client, - TemplateID: dtId, - Region: d.Get("region").(string), - HideInstanceConfigurations: true, - }) - - if err != nil { - return nil, err - } - - baseUpdatePayloads := &models.DeploymentUpdateResources{ - Apm: template.DeploymentTemplate.Resources.Apm, - Appsearch: template.DeploymentTemplate.Resources.Appsearch, - Elasticsearch: template.DeploymentTemplate.Resources.Elasticsearch, - EnterpriseSearch: template.DeploymentTemplate.Resources.EnterpriseSearch, - IntegrationsServer: template.DeploymentTemplate.Resources.IntegrationsServer, - Kibana: template.DeploymentTemplate.Resources.Kibana, - } - - // If the deployment template has changed then we should use the template migration API - // to build the base update payloads - if d.HasChange("deployment_template_id") && prevDtId.(string) != "" { - // Get an update request from the template migration API - migrateUpdateRequest, err := client.V1API.Deployments.MigrateDeploymentTemplate( - deployments.NewMigrateDeploymentTemplateParams().WithDeploymentID(d.Id()).WithTemplateID(dtId), - client.AuthWriter, - ) - - if err != nil { - return nil, err - } - - if len(migrateUpdateRequest.Payload.Resources.Apm) > 0 { - baseUpdatePayloads.Apm = migrateUpdateRequest.Payload.Resources.Apm - } - - if len(migrateUpdateRequest.Payload.Resources.Appsearch) > 0 { - baseUpdatePayloads.Appsearch = migrateUpdateRequest.Payload.Resources.Appsearch - } - - if len(migrateUpdateRequest.Payload.Resources.Elasticsearch) > 0 { - baseUpdatePayloads.Elasticsearch = migrateUpdateRequest.Payload.Resources.Elasticsearch - } - - if len(migrateUpdateRequest.Payload.Resources.EnterpriseSearch) > 0 { - baseUpdatePayloads.EnterpriseSearch = migrateUpdateRequest.Payload.Resources.EnterpriseSearch - } - - if len(migrateUpdateRequest.Payload.Resources.IntegrationsServer) > 0 { - baseUpdatePayloads.IntegrationsServer = migrateUpdateRequest.Payload.Resources.IntegrationsServer - } - - if len(migrateUpdateRequest.Payload.Resources.Kibana) > 0 { - baseUpdatePayloads.Kibana = migrateUpdateRequest.Payload.Resources.Kibana - } - } - - return baseUpdatePayloads, nil -} - -func updateResourceToModel(d *schema.ResourceData, client *api.API) (*models.DeploymentUpdateRequest, error) { - var result = models.DeploymentUpdateRequest{ - Name: d.Get("name").(string), - Alias: d.Get("alias").(string), - PruneOrphans: ec.Bool(true), - Resources: &models.DeploymentUpdateResources{}, - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{}, - } - - updatePayloads, err := getBaseUpdatePayloads(d, client) - if err != nil { - return nil, err - } - - dtID := d.Get("deployment_template_id").(string) - version := d.Get("version").(string) - es := d.Get("elasticsearch").([]interface{}) - kibana := d.Get("kibana").([]interface{}) - apm := d.Get("apm").([]interface{}) - integrationsServer := d.Get("integrations_server").([]interface{}) - enterpriseSearch := d.Get("enterprise_search").([]interface{}) - - prevDT, _ := d.GetChange("deployment_template_id") - if d.HasChange("deployment_template_id") && prevDT != "" { - unsetInstanceConfigurations([][]interface{}{ - es, kibana, apm, integrationsServer, enterpriseSearch, - }) - } - - useNodeRoles, err := compatibleWithNodeRoles(version) - if err != nil { - return nil, err - } - convertLegacy, err := legacyToNodeRoles(d) - if err != nil { - return nil, err - } - useNodeRoles = useNodeRoles && convertLegacy - - merr := multierror.NewPrefixed("invalid configuration") - esRes, err := expandEsResources( - es, enrichElasticsearchTemplate( - esResourceFromUpdate(updatePayloads), dtID, version, useNodeRoles, - ), - ) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Elasticsearch = append(result.Resources.Elasticsearch, esRes...) - - // if the restore snapshot operation has been specified, the snapshot restore - // can't be full once the cluster has been created, so the Strategy must be set - // to "partial". - ensurePartialSnapshotStrategy(esRes) - - kibanaRes, err := expandKibanaResources(kibana, kibanaResourceFromUpdate(updatePayloads)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Kibana = append(result.Resources.Kibana, kibanaRes...) - - apmRes, err := expandApmResources(apm, apmResourceFromUpdate(updatePayloads)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.Apm = append(result.Resources.Apm, apmRes...) - - integrationsServerRes, err := expandIntegrationsServerResources(integrationsServer, integrationsServerResourceFromUpdate(updatePayloads)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.IntegrationsServer = append(result.Resources.IntegrationsServer, integrationsServerRes...) - - enterpriseSearchRes, err := expandEssResources(enterpriseSearch, essResourceFromUpdate(updatePayloads)) - if err != nil { - merr = merr.Append(err) - } - result.Resources.EnterpriseSearch = append(result.Resources.EnterpriseSearch, enterpriseSearchRes...) - - if err := merr.ErrorOrNil(); err != nil { - return nil, err - } - - observability, err := expandObservability(d.Get("observability").([]interface{}), client) - if err != nil { - return nil, err - } - result.Settings.Observability = observability - - // In order to stop shipping logs and metrics, an empty Observability - // object must be passed, as opposed to a nil object when creating a - // deployment without observability settings. - if util.ObjectRemoved(d, "observability") { - result.Settings.Observability = &models.DeploymentObservabilitySettings{} - } - - result.Metadata.Tags = expandTags(d.Get("tags").(map[string]interface{})) - - return &result, nil -} - -func unsetInstanceConfigurations(rawResources [][]interface{}) { - for _, resource := range rawResources { - for _, r := range resource { - topologies := r.(map[string]interface{})["topology"].([]interface{}) - for _, topology := range topologies { - delete(topology.(map[string]interface{}), "instance_configuration_id") - } - } - } -} - -func enrichElasticsearchTemplate(tpl *models.ElasticsearchPayload, dt, version string, useNodeRoles bool) *models.ElasticsearchPayload { - if tpl.Plan.DeploymentTemplate == nil { - tpl.Plan.DeploymentTemplate = &models.DeploymentTemplateReference{} - } - - if tpl.Plan.DeploymentTemplate.ID == nil || *tpl.Plan.DeploymentTemplate.ID == "" { - tpl.Plan.DeploymentTemplate.ID = ec.String(dt) - } - - if tpl.Plan.Elasticsearch.Version == "" { - tpl.Plan.Elasticsearch.Version = version - } - - for _, topology := range tpl.Plan.ClusterTopology { - if useNodeRoles { - topology.NodeType = nil - continue - } - topology.NodeRoles = nil - } - - return tpl -} - -func expandTags(raw map[string]interface{}) []*models.MetadataItem { - result := make([]*models.MetadataItem, 0, len(raw)) - for k, v := range raw { - result = append(result, &models.MetadataItem{ - Key: ec.String(k), - Value: ec.String(v.(string)), - }) - } - - // Sort by key - sort.SliceStable(result, func(i, j int) bool { - return *result[i].Key < *result[j].Key - }) - - return result -} - -func compatibleWithNodeRoles(version string) (bool, error) { - deploymentVersion, err := semver.Parse(version) - if err != nil { - return false, fmt.Errorf("failed to parse Elasticsearch version: %w", err) - } - - return deploymentVersion.GE(dataTiersVersion), nil -} - -func ensurePartialSnapshotStrategy(ess []*models.ElasticsearchPayload) { - for _, es := range ess { - transient := es.Plan.Transient - if transient == nil || transient.RestoreSnapshot == nil { - continue - } - transient.RestoreSnapshot.Strategy = "partial" - } -} - -// legacyToNodeRoles returns true when the legacy "node_type_*" should be -// migrated over to node_roles. Which will be true when: -// * The version field doesn't change. -// * The version field changes but: -// - The Elasticsearch.0.toplogy doesn't have any node_type_* set. -func legacyToNodeRoles(d *schema.ResourceData) (bool, error) { - if !d.HasChange("version") { - return true, nil - } - - oldVRaw, newVRaw := d.GetChange("version") - oldVS, newVS := oldVRaw.(string), newVRaw.(string) - - // If the previous version is empty, node_roles should be used. - if oldVS == "" { - return true, nil - } - - oldV, err := semver.Parse(oldVS) - if err != nil { - return false, fmt.Errorf("failed to parse previous Elasticsearch version: %w", err) - } - newV, err := semver.Parse(newVS) - if err != nil { - return false, fmt.Errorf("failed to parse previous Elasticsearch version: %w", err) - } - - // if the version change moves from non-node_roles to one - // that supports node roles, do not migrate on that step. - if oldV.LT(dataTiersVersion) && newV.GE(dataTiersVersion) { - return false, nil - } - - // When any topology elements in the state have the node_type_* - // properties set, the node_role field cannot be used, since - // we'd be changing the version AND migrating over `node_role`s - // which is not permitted by the API. - var hasNodeTypeSet bool - for _, t := range d.Get("elasticsearch.0.topology").([]interface{}) { - top := t.(map[string]interface{}) - if nt, ok := top["node_type_data"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - if nt, ok := top["node_type_ingest"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - if nt, ok := top["node_type_master"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - if nt, ok := top["node_type_ml"]; ok { - if nt.(string) != "" { - hasNodeTypeSet = true - } - } - } - - if hasNodeTypeSet { - return false, nil - } - - return true, nil -} diff --git a/ec/ecresource/deploymentresource/expanders_test.go b/ec/ecresource/deploymentresource/expanders_test.go deleted file mode 100644 index f5998888f..000000000 --- a/ec/ecresource/deploymentresource/expanders_test.go +++ /dev/null @@ -1,5063 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "errors" - "io" - "os" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func fileAsResponseBody(t *testing.T, name string) io.ReadCloser { - t.Helper() - f, err := os.Open(name) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - var buf = new(bytes.Buffer) - if _, err := io.Copy(buf, f); err != nil { - t.Fatal(err) - } - buf.WriteString("\n") - - return io.NopCloser(buf) -} - -func Test_createResourceToModel(t *testing.T) { - deploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - deploymentNodeRolesRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeployment(), - Schema: newSchema(), - }) - ioOptimizedTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-io-optimized-v2.json") - } - deploymentOverrideRd := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentOverrides(), - Schema: newSchema(), - }) - deploymentOverrideICRd := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentOverridesIC(), - Schema: newSchema(), - }) - hotWarmTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-hot-warm-v2.json") - } - deploymentHotWarm := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }) - - ccsTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-cross-cluster-search-v2.json") - } - deploymentCCS := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }) - - emptyTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-empty.json") - } - deploymentEmptyTemplate := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "empty-deployment-template", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - }, - }) - - deploymentWithTags := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }, - }, - "tags": map[string]interface{}{ - "aaa": "bbb", - "owner": "elastic", - "cost-center": "rnd", - }, - }, - Schema: newSchema(), - }) - - type args struct { - d *schema.ResourceData - client *api.API - } - tests := []struct { - name string - args args - want *models.DeploymentCreateRequest - err error - }{ - { - name: "parses the resources", - args: args{ - d: deploymentNodeRolesRD, - client: api.NewMock( - mock.New200Response(hotWarmTpl()), - mock.New200Response( - mock.NewStructBody(models.DeploymentGetResponse{ - Healthy: ec.Bool(true), - ID: ec.String(mock.ValidClusterID), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - ID: ec.String(mock.ValidClusterID), - RefID: ec.String("main-elasticsearch"), - }}, - }, - }), - ), - ), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.1", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - NodeRoles: []string{ - "data_content", - "data_hot", - "ingest", - "master", - "remote_cluster_client", - "transform", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the legacy resources", - args: args{ - d: deploymentRD, - client: api.NewMock( - mock.New200Response(ioOptimizedTpl()), - mock.New200Response( - mock.NewStructBody(models.DeploymentGetResponse{ - Healthy: ec.Bool(true), - ID: ec.String(mock.ValidClusterID), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - ID: ec.String(mock.ValidClusterID), - RefID: ec.String("main-elasticsearch"), - }}, - }, - }), - ), - ), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (IO Optimized)", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (IO Optimized) with node_roles", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.11.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with topology overrides (size)", - args: args{ - d: deploymentOverrideRd, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with topology overrides (IC)", - args: args{ - d: deploymentOverrideICRd, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - Settings: &models.DeploymentCreateSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm)", - args: args{ - d: deploymentHotWarm, - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Curation: nil, - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(false), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm) with node_roles", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }), - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Curation: nil, - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm) with node_roles and extensions", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "extension": []interface{}{ - map[string]interface{}{ - "name": "my-plugin", - "type": "plugin", - "url": "repo://12311234", - "version": "7.7.0", - }, - map[string]interface{}{ - "name": "my-second-plugin", - "type": "plugin", - "url": "repo://12311235", - "version": "7.7.0", - }, - map[string]interface{}{ - "name": "my-bundle", - "type": "bundle", - "url": "repo://1231122", - "version": "7.7.0", - }, - map[string]interface{}{ - "name": "my-second-bundle", - "type": "bundle", - "url": "repo://1231123", - "version": "7.7.0", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - UserBundles: []*models.ElasticsearchUserBundle{ - { - URL: ec.String("repo://1231122"), - Name: ec.String("my-bundle"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - { - URL: ec.String("repo://1231123"), - Name: ec.String("my-second-bundle"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - }, - UserPlugins: []*models.ElasticsearchUserPlugin{ - { - URL: ec.String("repo://12311235"), - Name: ec.String("my-second-plugin"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - { - URL: ec.String("repo://12311234"), - Name: ec.String("my-plugin"), - ElasticsearchVersion: ec.String("7.7.0"), - }, - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with autoscaling enabled", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(true), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with autoscaling enabled and custom policies set", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "232g", - }}, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "116g", - }}, - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(true), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(237568), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with dedicated master and cold tiers", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "master", - ZoneCount: 3, - InstanceConfigurationID: "aws.master.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with dedicated coordinating and cold tiers", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "coordinating", - "size": "2g", - "zone_count": 2, - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "coordinating", - ZoneCount: 2, - InstanceConfigurationID: "aws.coordinating.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "deployment with dedicated coordinating, master and cold tiers", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "coordinating", - "size": "2g", - "zone_count": 2, - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "coordinating", - ZoneCount: 2, - InstanceConfigurationID: "aws.coordinating.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "master", - ZoneCount: 3, - InstanceConfigurationID: "aws.master.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{}, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - // - { - name: "deployment with docker_image overrides", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.14.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/elasticsearch/container:7.14.1-hash", - }}, - "autoscale": "false", - "trust_account": []interface{}{ - map[string]interface{}{ - "account_id": "ANID", - "trust_all": "true", - }, - }, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/kibana/container:7.14.1-hash", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/apm/container:7.14.1-hash", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/enterprise_search/container:7.14.1-hash", - }}, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Trust: &models.ElasticsearchClusterTrustSettings{ - Accounts: []*models.AccountTrustRelationship{ - { - AccountID: ec.String("ANID"), - TrustAll: ec.Bool(true), - }, - }, - }, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/elasticsearch/container:7.14.1-hash", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Apm: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - DockerImage: "docker.elastic.com/apm/container:7.14.1-hash", - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - ZoneCount: 1, - }}, - }, - RefID: ec.String("main-apm"), - Region: ec.String("us-east-1"), - }}, - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - DockerImage: "docker.elastic.com/kibana/container:7.14.1-hash", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{{ - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - ZoneCount: 1, - }}, - }, - RefID: ec.String("main-kibana"), - Region: ec.String("us-east-1"), - }}, - EnterpriseSearch: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - DockerImage: "docker.elastic.com/enterprise_search/container:7.14.1-hash", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - ZoneCount: 2, - }}, - }, - RefID: ec.String("main-enterprise_search"), - Region: ec.String("us-east-1"), - }}, - }, - }, - }, - { - name: "deployment with trust settings set", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "trust_account": []interface{}{ - map[string]interface{}{ - "account_id": "ANID", - "trust_all": "true", - }, - map[string]interface{}{ - "account_id": "anotherID", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "hij", "dfg", - }, - }, - }, - "trust_external": []interface{}{ - map[string]interface{}{ - "relationship_id": "external_id", - "trust_all": "true", - }, - map[string]interface{}{ - "relationship_id": "another_external_id", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "dfg", - }, - }, - }, - "topology": []interface{}{ - map[string]interface{}{ - "id": "cold", - "size": "2g", - }, - map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "232g", - }}, - }, - map[string]interface{}{ - "id": "warm", - "size": "4g", - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "116g", - }}, - }, - }, - }}, - }, - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Trust: &models.ElasticsearchClusterTrustSettings{ - Accounts: []*models.AccountTrustRelationship{ - { - AccountID: ec.String("ANID"), - TrustAll: ec.Bool(true), - }, - { - AccountID: ec.String("anotherID"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", "hij", - }, - }, - }, - External: []*models.ExternalTrustRelationship{ - { - TrustRelationshipID: ec.String("external_id"), - TrustAll: ec.Bool(true), - }, - { - TrustRelationshipID: ec.String("another_external_id"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", - }, - }, - }, - }, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "cold", - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeRoles: []string{ - "data_cold", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "cold", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(59392), - Resource: ec.String("memory"), - }, - }, - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(237568), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{ - "data": "warm", - }, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "parses the resources with empty declarations (Cross Cluster Search)", - args: args{ - d: deploymentCCS, - client: api.NewMock(mock.New200Response(ccsTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ccsTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{}, - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-cross-cluster-search-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 1, - InstanceConfigurationID: "aws.ccs.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with tags", - args: args{ - d: deploymentWithTags, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{Tags: []*models.MetadataItem{ - {Key: ec.String("aaa"), Value: ec.String("bbb")}, - {Key: ec.String("cost-center"), Value: ec.String("rnd")}, - {Key: ec.String("owner"), Value: ec.String("elastic")}, - }}, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles a snapshot_source block, leaving the strategy as is", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "snapshot_source": []interface{}{map[string]interface{}{ - "source_elasticsearch_cluster_id": "8c63b87af9e24ea49b8a4bfe550e5fe9", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentCreateRequest{ - Name: "my_deployment_name", - Settings: &models.DeploymentCreateSettings{}, - Metadata: &models.DeploymentCreateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentCreateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", - SnapshotName: ec.String("__latest_success__"), - }, - }, - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - // This case we're using an empty deployment_template to ensure that - // resources not present in the template cannot be expanded, receiving - // an error instead. - { - name: "parses the resources with empty explicit declarations (Empty deployment template)", - args: args{ - d: deploymentEmptyTemplate, - client: api.NewMock(mock.New200Response(emptyTpl())), - }, - err: multierror.NewPrefixed("invalid configuration", - errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana"), - errors.New("apm specified but deployment template is not configured for it. Use a different template if you wish to add apm"), - errors.New("enterprise_search specified but deployment template is not configured for it. Use a different template if you wish to add enterprise_search"), - ), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := createResourceToModel(tt.args.d, tt.args.client) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_updateResourceToModel(t *testing.T) { - deploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - var ioOptimizedTpl = func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-io-optimized-v2.json") - } - deploymentEmptyRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentEmptyRD(), - Schema: newSchema(), - }) - deploymentOverrideRd := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleDeploymentOverrides(), - Schema: newSchema(), - }) - - hotWarmTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-hot-warm-v2.json") - } - deploymentHotWarm := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - }) - - ccsTpl := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/template-aws-cross-cluster-search-v2.json") - } - ccsDeploymentUpdate := func() io.ReadCloser { - return fileAsResponseBody(t, "testdata/deployment-update-aws-cross-cluster-search-v2.json") - } - deploymentEmptyRDWithTemplateChange := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - deploymentEmptyRDWithTemplateChangeWithDiffSize := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "coordinating", - "size": "16g", - }}, - }, - }, - "kibana": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - deploymentChangeFromExplicitSizingToEmpty := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "coordinating", - "size": "16g", - }}, - }, - }, - "kibana": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "size": "8g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - }, - Schema: newSchema(), - }) - - deploymentWithTags := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }, - }, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }, - }, - "tags": map[string]interface{}{ - "aaa": "bbb", - "owner": "elastic", - "cost-center": "rnd", - }, - }, - Schema: newSchema(), - }) - - type args struct { - d *schema.ResourceData - client *api.API - } - tests := []struct { - name string - args args - want *models.DeploymentUpdateRequest - err error - }{ - { - name: "parses the resources", - args: args{ - d: deploymentRD, - client: api.NewMock( - mock.New200Response(ioOptimizedTpl()), - mock.New200Response( - mock.NewStructBody(models.DeploymentGetResponse{ - Healthy: ec.Bool(true), - ID: ec.String(mock.ValidClusterID), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - ID: ec.String(mock.ValidClusterID), - RefID: ec.String("main-elasticsearch"), - }}, - }, - }), - ), - ), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations", - args: args{ - d: deploymentEmptyRD, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with topology overrides", - args: args{ - d: deploymentOverrideRd, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }, - }, - EnterpriseSearch: []*models.EnterpriseSearchPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "parses the resources with empty declarations (Hot Warm)", - args: args{ - d: deploymentHotWarm, - client: api.NewMock(mock.New200Response(hotWarmTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, hotWarmTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - Curation: nil, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - Curation: nil, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-hot-warm-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d2", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(false), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "toplogy change from hot / warm to cross cluster search", - args: args{ - d: deploymentEmptyRDWithTemplateChange, - client: api.NewMock( - mock.New200Response(ccsTpl()), - mock.New200Response(ccsDeploymentUpdate()), - ), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - Alias: "my-deployment", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{ - Observability: &models.DeploymentObservabilitySettings{}, - }, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerDeploymentUpdateToESPayload(t, ccsDeploymentUpdate(), false, "aws-cross-cluster-search-v2"), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{}, - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-cross-cluster-search-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{}, - ZoneCount: 1, - InstanceConfigurationID: "aws.ccs.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }}, - }, - }, - }, - // The behavior of this change should be: - // * Keeps the kibana toplogy size to 2g even though the topology element has been removed (saved value persists). - // * Removes all other non present resources - { - name: "topology change with sizes not default from io optimized to cross cluster search", - args: args{ - d: deploymentEmptyRDWithTemplateChangeWithDiffSize, - client: api.NewMock( - mock.New200Response(ccsTpl()), - mock.New200Response(ccsDeploymentUpdate()), - ), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerDeploymentUpdateToESPayload(t, ccsDeploymentUpdate(), false, "aws-cross-cluster-search-v2"), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{}, - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-cross-cluster-search-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{}, - ZoneCount: 1, - InstanceConfigurationID: "aws.ccs.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }), - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }}, - }, - }, - }, - // The behavior of this change should be: - // * Keeps all topology sizes as they were defined (saved value persists). - { - name: "topology change with sizes not default from explicit value to empty", - args: args{ - d: deploymentChangeFromExplicitSizingToEmpty, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - Kibana: []*models.KibanaPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }}, - Apm: []*models.ApmPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{}, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }}, - EnterpriseSearch: []*models.EnterpriseSearchPayload{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{}, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 2, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }}, - }, - }, - }, - { - name: "does not migrate node_type to node_role on version upgrade that's lower than 7.10.0", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.9.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.11.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "does not migrate node_type to node_role on version upgrade that's higher than 7.10.0", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.11.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), false), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.11.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "migrates node_type to node_role when the existing topology element size is updated", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "32g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(32768), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "migrates node_type to node_role when the existing topology element size is updated and adds warm tier", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - }, - map[string]interface{}{ - "id": "warm", - "size": "8g", - }, - }, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "enables autoscaling with the default policies", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "16g", - }, - map[string]interface{}{ - "id": "warm", - "size": "8g", - }, - }, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(true), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "updates topologies configuration", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{ - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }, - map[string]interface{}{ - "topology": []interface{}{map[string]interface{}{ - "id": "warm", - "size": "8g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }, - }, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.12.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "16g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: false", - }}, - }, - map[string]interface{}{ - "id": "master", - "size": "1g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: false", - }}, - }, - map[string]interface{}{ - "id": "warm", - "size": "8g", - "zone_count": 3, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: false", - }}, - }, - }, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.12.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{ - { - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - UserSettingsYaml: "setting: false", - }, - ZoneCount: 3, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(16384), - }, - NodeRoles: []string{ - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - { - ID: "master", - Elasticsearch: &models.ElasticsearchConfiguration{ - UserSettingsYaml: "setting: false", - }, - ZoneCount: 3, - InstanceConfigurationID: "aws.master.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - NodeRoles: []string{ - "master", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - }, - { - ID: "warm", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "warm"}, - UserSettingsYaml: "setting: false", - }, - ZoneCount: 3, - InstanceConfigurationID: "aws.data.highstorage.d3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "data_warm", - "remote_cluster_client", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(0), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }, - }, - }, - }), - }, - }, - }, - { - name: "parses the resources with tags", - args: args{ - d: deploymentWithTags, - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{Tags: []*models.MetadataItem{ - {Key: ec.String("aaa"), Value: ec.String("bbb")}, - {Key: ec.String("cost-center"), Value: ec.String("rnd")}, - {Key: ec.String("owner"), Value: ec.String("elastic")}, - }}, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles a snapshot_source block adding Strategy: partial", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }}, - }, - Change: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "snapshot_source": []interface{}{map[string]interface{}{ - "source_elasticsearch_cluster_id": "8c63b87af9e24ea49b8a4bfe550e5fe9", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "8c63b87af9e24ea49b8a4bfe550e5fe9", - SnapshotName: ec.String("__latest_success__"), - Strategy: "partial", - }, - }, - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles empty Elasticsearch empty config block", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "config": []interface{}{}, - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - { - name: "handles Elasticsearch with topology.config block", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.10.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "version": "7.10.1", - "config": []interface{}{}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "8g", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "setting: true", - }}, - }}, - }}, - }, - Schema: newSchema(), - }), - client: api.NewMock(mock.New200Response(ioOptimizedTpl())), - }, - want: &models.DeploymentUpdateRequest{ - Name: "my_deployment_name", - PruneOrphans: ec.Bool(true), - Settings: &models.DeploymentUpdateSettings{}, - Metadata: &models.DeploymentUpdateMetadata{ - Tags: []*models.MetadataItem{}, - }, - Resources: &models.DeploymentUpdateResources{ - Elasticsearch: enrichWithEmptyTopologies(readerToESPayload(t, ioOptimizedTpl(), true), &models.ElasticsearchPayload{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Settings: &models.ElasticsearchClusterSettings{ - DedicatedMastersThreshold: 6, - }, - Plan: &models.ElasticsearchClusterPlan{ - AutoscalingEnabled: ec.Bool(false), - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.10.1", - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - UserSettingsYaml: "setting: true", - }, - ZoneCount: 2, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(8192), - }, - NodeRoles: []string{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - AutoscalingMax: &models.TopologySize{ - Value: ec.Int32(118784), - Resource: ec.String("memory"), - }, - }}, - }, - }), - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := updateResourceToModel(tt.args.d, tt.args.client) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_ensurePartialSnapshotStrategy(t *testing.T) { - type args struct { - ess []*models.ElasticsearchPayload - } - tests := []struct { - name string - args args - want []*models.ElasticsearchPayload - }{ - { - name: "ignores resources with no transient block", - args: args{ess: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{}, - }}}, - want: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{}, - }}, - }, - { - name: "ignores resources with no transient.snapshot block", - args: args{ess: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{}, - }, - }}}, - want: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{}, - }, - }}, - }, - { - name: "Sets strategy to partial", - args: args{ess: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "some", - }, - }, - }, - }}}, - want: []*models.ElasticsearchPayload{{ - Plan: &models.ElasticsearchClusterPlan{ - Transient: &models.TransientElasticsearchPlanConfiguration{ - RestoreSnapshot: &models.RestoreSnapshotConfiguration{ - SourceClusterID: "some", - Strategy: "partial", - }, - }, - }, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ensurePartialSnapshotStrategy(tt.args.ess) - assert.Equal(t, tt.want, tt.args.ess) - }) - } -} diff --git a/ec/ecresource/deploymentresource/flatteners.go b/ec/ecresource/deploymentresource/flatteners.go deleted file mode 100644 index 86c6cbaaf..000000000 --- a/ec/ecresource/deploymentresource/flatteners.go +++ /dev/null @@ -1,320 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "fmt" - "strings" - - semver "github.com/blang/semver/v4" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func modelToState(d *schema.ResourceData, res *models.DeploymentGetResponse, remotes models.RemoteResources) error { - if err := d.Set("name", res.Name); err != nil { - return err - } - - if err := d.Set("alias", res.Alias); err != nil { - return err - } - - if res.Metadata != nil { - if err := d.Set("tags", flattenTags(res.Metadata.Tags)); err != nil { - return err - } - } - - if res.Resources != nil { - dt, err := getDeploymentTemplateID(res.Resources) - if err != nil { - return err - } - - if err := d.Set("deployment_template_id", dt); err != nil { - return err - } - - if err := d.Set("region", getRegion(res.Resources)); err != nil { - return err - } - - // We're reconciling the version and storing the lowest version of any - // of the deployment resources. This ensures that if an upgrade fails, - // the state version will be lower than the desired version, making - // retries possible. Once more resource types are added, the function - // needs to be modified to check those as well. - version, err := getLowestVersion(res.Resources) - if err != nil { - // This code path is highly unlikely, but we're bubbling up the - // error in case one of the versions isn't parseable by semver. - return fmt.Errorf("failed reading deployment: %w", err) - } - if err := d.Set("version", version); err != nil { - return err - } - - esFlattened, err := flattenEsResources(res.Resources.Elasticsearch, *res.Name, remotes) - if err != nil { - return err - } - if err := d.Set("elasticsearch", esFlattened); err != nil { - return err - } - - kibanaFlattened := flattenKibanaResources(res.Resources.Kibana, *res.Name) - if len(kibanaFlattened) > 0 { - if err := d.Set("kibana", kibanaFlattened); err != nil { - return err - } - } - - apmFlattened := flattenApmResources(res.Resources.Apm, *res.Name) - if len(apmFlattened) > 0 { - if err := d.Set("apm", apmFlattened); err != nil { - return err - } - } - - integrationsServerFlattened := flattenIntegrationsServerResources(res.Resources.IntegrationsServer, *res.Name) - if len(integrationsServerFlattened) > 0 { - if err := d.Set("integrations_server", integrationsServerFlattened); err != nil { - return err - } - } - - enterpriseSearchFlattened := flattenEssResources(res.Resources.EnterpriseSearch, *res.Name) - if len(enterpriseSearchFlattened) > 0 { - if err := d.Set("enterprise_search", enterpriseSearchFlattened); err != nil { - return err - } - } - - if settings := flattenTrafficFiltering(res.Settings); settings != nil { - if err := d.Set("traffic_filter", settings); err != nil { - return err - } - } - - if observability := flattenObservability(res.Settings); len(observability) > 0 { - if err := d.Set("observability", observability); err != nil { - return err - } - } - } - - return nil -} - -func getDeploymentTemplateID(res *models.DeploymentResources) (string, error) { - var deploymentTemplateID string - var foundTemplates []string - for _, esRes := range res.Elasticsearch { - if util.IsCurrentEsPlanEmpty(esRes) { - continue - } - - var emptyDT = esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate == nil - if emptyDT { - continue - } - - if deploymentTemplateID == "" { - deploymentTemplateID = *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID - } - - foundTemplates = append(foundTemplates, - *esRes.Info.PlanInfo.Current.Plan.DeploymentTemplate.ID, - ) - } - - if deploymentTemplateID == "" { - return "", errors.New("failed to obtain the deployment template id") - } - - if len(foundTemplates) > 1 { - return "", fmt.Errorf( - "there are more than 1 deployment templates specified on the deployment: \"%s\"", strings.Join(foundTemplates, ", "), - ) - } - - return deploymentTemplateID, nil -} - -// parseCredentials parses the Create or Update response Resources populating -// credential settings in the Terraform state if the keys are found, currently -// populates the following credentials in plain text: -// * Elasticsearch username and Password -func parseCredentials(d *schema.ResourceData, resources []*models.DeploymentResource) error { - var merr = multierror.NewPrefixed("failed parsing credentials") - for _, res := range resources { - // Parse ES credentials - if creds := res.Credentials; creds != nil { - if creds.Username != nil && *creds.Username != "" { - if err := d.Set("elasticsearch_username", *creds.Username); err != nil { - merr = merr.Append(err) - } - } - - if creds.Password != nil && *creds.Password != "" { - if err := d.Set("elasticsearch_password", *creds.Password); err != nil { - merr = merr.Append(err) - } - } - } - - // Parse APM secret_token - if res.SecretToken != "" { - if err := d.Set("apm_secret_token", res.SecretToken); err != nil { - merr = merr.Append(err) - } - } - } - - return merr.ErrorOrNil() -} - -func getRegion(res *models.DeploymentResources) (region string) { - for _, r := range res.Elasticsearch { - if r.Region != nil && *r.Region != "" { - return *r.Region - } - } - - return region -} - -func getLowestVersion(res *models.DeploymentResources) (string, error) { - // We're starting off with a very high version so it can be replaced. - replaceVersion := `99.99.99` - version := semver.MustParse(replaceVersion) - for _, r := range res.Elasticsearch { - if !util.IsCurrentEsPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Elasticsearch.Version - if err := swapLowerVersion(&version, v); err != nil && !isEsResourceStopped(r) { - return "", fmt.Errorf("elasticsearch version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.Kibana { - if !util.IsCurrentKibanaPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Kibana.Version - if err := swapLowerVersion(&version, v); err != nil && !isKibanaResourceStopped(r) { - return version.String(), fmt.Errorf("kibana version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.Apm { - if !util.IsCurrentApmPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.Apm.Version - if err := swapLowerVersion(&version, v); err != nil && !isApmResourceStopped(r) { - return version.String(), fmt.Errorf("apm version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.IntegrationsServer { - if !util.IsCurrentIntegrationsServerPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.IntegrationsServer.Version - if err := swapLowerVersion(&version, v); err != nil && !isIntegrationsServerResourceStopped(r) { - return version.String(), fmt.Errorf("integrations_server version '%s' is not semver compliant: %w", v, err) - } - } - } - - for _, r := range res.EnterpriseSearch { - if !util.IsCurrentEssPlanEmpty(r) { - v := r.Info.PlanInfo.Current.Plan.EnterpriseSearch.Version - if err := swapLowerVersion(&version, v); err != nil && !isEssResourceStopped(r) { - return version.String(), fmt.Errorf("enterprise search version '%s' is not semver compliant: %w", v, err) - } - } - } - - if version.String() != replaceVersion { - return version.String(), nil - } - return "", errors.New("Unable to determine the lowest version for any the deployment components") -} - -func swapLowerVersion(version *semver.Version, comp string) error { - if comp == "" { - return nil - } - - v, err := semver.Parse(comp) - if err != nil { - return err - } - if v.LT(*version) { - *version = v - } - return nil -} - -func hasRunningResources(res *models.DeploymentGetResponse) bool { - var hasRunning bool - if res.Resources != nil { - for _, r := range res.Resources.Elasticsearch { - if !isEsResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.Kibana { - if !isKibanaResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.Apm { - if !isApmResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.EnterpriseSearch { - if !isEssResourceStopped(r) { - hasRunning = true - } - } - for _, r := range res.Resources.IntegrationsServer { - if !isIntegrationsServerResourceStopped(r) { - hasRunning = true - } - } - } - return hasRunning -} - -func flattenTags(tags []*models.MetadataItem) map[string]interface{} { - if len(tags) == 0 { - return nil - } - - result := make(map[string]interface{}, len(tags)) - for _, tag := range tags { - result[*tag.Key] = *tag.Value - } - - return result -} diff --git a/ec/ecresource/deploymentresource/flatteners_test.go b/ec/ecresource/deploymentresource/flatteners_test.go deleted file mode 100644 index 8b4400fb8..000000000 --- a/ec/ecresource/deploymentresource/flatteners_test.go +++ /dev/null @@ -1,1786 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_modelToState(t *testing.T) { - deploymentSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentSchemaArg.SetId(mock.ValidClusterID) - - deploymentLowerVersionSchemaArg := schema.TestResourceDataRaw(t, newSchema(), nil) - deploymentLowerVersionSchemaArg.SetId(mock.ValidClusterID) - - wantDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - azureIOOptimizedRes := openDeploymentGet(t, "testdata/deployment-azure-io-optimized.json") - azureIOOptimizedRD := schema.TestResourceDataRaw(t, newSchema(), nil) - azureIOOptimizedRD.SetId(mock.ValidClusterID) - wantAzureIOOptimizedDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "azure-io-optimized", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "azure-eastus2", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "azure-eastus2", - "resource_id": "1235d8c911b74dd6a03c2a7b37fd68ab", - "version": "7.9.2", - "http_endpoint": "http://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:9200", - "https_endpoint": "https://1235d8c911b74dd6a03c2a7b37fd68ab.apm.eastus2.azure.elastic-cloud.com:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "azure.apm.e32sv3", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:somecloudID", - "http_endpoint": "http://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9200", - "https_endpoint": "https://1238f19957874af69306787dca662154.eastus2.azure.elastic-cloud.com:9243", - "ref_id": "main-elasticsearch", - "region": "azure-eastus2", - "resource_id": "1238f19957874af69306787dca662154", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "azure.data.highio.l32sv2", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "azure-eastus2", - "resource_id": "1235cd4a4c7f464bbcfd795f3638b769", - "version": "7.9.2", - "http_endpoint": "http://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9200", - "https_endpoint": "https://1235cd4a4c7f464bbcfd795f3638b769.eastus2.azure.elastic-cloud.com:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "azure.kibana.e32sv3", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - awsIOOptimizedRes := openDeploymentGet(t, "testdata/deployment-aws-io-optimized.json") - awsIOOptimizedRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsIOOptimizedRD.SetId(mock.ValidClusterID) - wantAwsIOOptimizedDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "aws-eu-central-1", - "resource_id": "12328579b3bf40c8b58c1a0ed5a4bd8b", - "version": "7.9.2", - "http_endpoint": "http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80", - "https_endpoint": "https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "aws-eu-central-1", - "resource_id": "1239f7ee7196439ba2d105319ac5eba7", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "aws-eu-central-1", - "resource_id": "123dcfda06254ca789eb287e8b73ff4c", - "version": "7.9.2", - "http_endpoint": "http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - awsIOOptimizedExtensionRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsIOOptimizedExtensionRD.SetId(mock.ValidClusterID) - - awsIOOptimizedTagsRes := openDeploymentGet(t, "testdata/deployment-aws-io-optimized-tags.json") - awsIOOptimizedTagsRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsIOOptimizedTagsRD.SetId(mock.ValidClusterID) - wantAwsIOOptimizedDeploymentTags := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "tags": map[string]interface{}{ - "aaa": "bbb", - "cost": "rnd", - "owner": "elastic", - }, - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "aws-eu-central-1", - "resource_id": "12328579b3bf40c8b58c1a0ed5a4bd8b", - "version": "7.9.2", - "http_endpoint": "http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80", - "https_endpoint": "https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "aws-eu-central-1", - "resource_id": "1239f7ee7196439ba2d105319ac5eba7", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "aws-eu-central-1", - "resource_id": "123dcfda06254ca789eb287e8b73ff4c", - "version": "7.9.2", - "http_endpoint": "http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - gcpIOOptimizedRes := openDeploymentGet(t, "testdata/deployment-gcp-io-optimized.json") - gcpIOOptimizedRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpIOOptimizedRD.SetId(mock.ValidClusterID) - wantGcpIOOptimizedDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "gcp-io-optimized", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "gcp-asia-east1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-asia-east1", - "resource_id": "12307c6c304949b8a9f3682b80900879", - "version": "7.9.2", - "http_endpoint": "http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80", - "https_endpoint": "https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-asia-east1", - "resource_id": "123695e76d914005bf90b717e668ad4b", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-asia-east1", - "resource_id": "12365046781e4d729a07df64fe67c8c6", - "version": "7.9.2", - "http_endpoint": "http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - - gcpIOOptimizedAutoscaleRes := openDeploymentGet(t, "testdata/deployment-gcp-io-optimized-autoscale.json") - gcpIOOptimizedAutoscaleRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpIOOptimizedAutoscaleRD.SetId(mock.ValidClusterID) - - gcpHotWarmRes := openDeploymentGet(t, "testdata/deployment-gcp-hot-warm.json") - gcpHotWarmRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpHotWarmRD.SetId(mock.ValidClusterID) - wantGcpHotWarmDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "deployment_template_id": "gcp-hot-warm", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d-hot-warm", - "region": "gcp-us-central1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-us-central1", - "resource_id": "1234b68b0b9347f1b49b1e01b33bf4a4", - "version": "7.9.2", - "http_endpoint": "http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80", - "https_endpoint": "https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d-hot-warm:someCloudID", - "http_endpoint": "http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-us-central1", - "resource_id": "123e837db6ee4391bb74887be35a7a91", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - }, - map[string]interface{}{ - "id": "warm", - "instance_configuration_id": "gcp.data.highstorage.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "false", - "node_type_ml": "false", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-us-central1", - "resource_id": "12372cc60d284e7e96b95ad14727c23d", - "version": "7.9.2", - "http_endpoint": "http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantGcpHotWarmDeployment.Set("alias", "") - - wantGcpIOOptAutoscale := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "", - "deployment_template_id": "gcp-io-optimized", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "gcp-asia-east1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-asia-east1", - "resource_id": "12307c6c304949b8a9f3682b80900879", - "version": "7.9.2", - "http_endpoint": "http://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:80", - "https_endpoint": "https://12307c6c304949b8a9f3682b80900879.apm.asia-east1.gcp.elastic-cloud.com:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "true", - "cloud_id": "up2d:someCloudID", - "http_endpoint": "http://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://123695e76d914005bf90b717e668ad4b.asia-east1.gcp.elastic-cloud.com:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-asia-east1", - "resource_id": "123695e76d914005bf90b717e668ad4b", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "29g", - "max_size_resource": "memory", - "policy_override_json": `{"proactive_storage":{"forecast_window":"3 h"}}`, - }}, - }, - map[string]interface{}{ - "id": "ml", - "instance_configuration_id": "gcp.ml.1", - "node_type_data": "false", - "node_type_ingest": "false", - "node_type_master": "false", - "node_type_ml": "true", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - "autoscaling": []interface{}{map[string]interface{}{ - "max_size": "30g", - "max_size_resource": "memory", - - "min_size": "1g", - "min_size_resource": "memory", - }}, - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-asia-east1", - "resource_id": "12365046781e4d729a07df64fe67c8c6", - "version": "7.9.2", - "http_endpoint": "http://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9200", - "https_endpoint": "https://12365046781e4d729a07df64fe67c8c6.asia-east1.gcp.elastic-cloud.com:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantGcpIOOptAutoscale.Set("alias", "") - - gcpHotWarmNodeRolesRes := openDeploymentGet(t, "testdata/deployment-gcp-hot-warm-node_roles.json") - gcpHotWarmNodeRolesRD := schema.TestResourceDataRaw(t, newSchema(), nil) - gcpHotWarmNodeRolesRD.SetId(mock.ValidClusterID) - wantGcpHotWarmNodeRolesDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "deployment_template_id": "gcp-hot-warm", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d-hot-warm", - "region": "gcp-us-central1", - "version": "7.11.0", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "gcp-us-central1", - "resource_id": "1234b68b0b9347f1b49b1e01b33bf4a4", - "version": "7.11.0", - "http_endpoint": "http://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:80", - "https_endpoint": "https://1234b68b0b9347f1b49b1e01b33bf4a4.apm.us-central1.gcp.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.apm.1", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d-hot-warm:someCloudID", - "http_endpoint": "http://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://123e837db6ee4391bb74887be35a7a91.us-central1.gcp.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "gcp-us-central1", - "resource_id": "123e837db6ee4391bb74887be35a7a91", - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "gcp.data.highio.1", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - "node_roles": []interface{}{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - }, - map[string]interface{}{ - "id": "warm", - "instance_configuration_id": "gcp.data.highstorage.1", - "size": "4g", - "size_resource": "memory", - "zone_count": 2, - "node_roles": []interface{}{ - "data_warm", - "remote_cluster_client", - }, - }, - }, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "gcp-us-central1", - "resource_id": "12372cc60d284e7e96b95ad14727c23d", - "version": "7.11.0", - "http_endpoint": "http://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9200", - "https_endpoint": "https://12372cc60d284e7e96b95ad14727c23d.us-central1.gcp.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.kibana.1", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantGcpHotWarmNodeRolesDeployment.Set("alias", "") - - awsCCSRes := openDeploymentGet(t, "testdata/deployment-aws-ccs.json") - awsCCSRD := schema.TestResourceDataRaw(t, newSchema(), nil) - awsCCSRD.SetId(mock.ValidClusterID) - wantAWSCCSDeployment := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "deployment_template_id": "aws-cross-cluster-search-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "ccs", - "region": "eu-west-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "ccs:someCloudID", - "http_endpoint": "http://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9200", - "https_endpoint": "https://1230b3ae633b4f51a432d50971f7f1c1.eu-west-1.aws.found.io:9243", - "ref_id": "main-elasticsearch", - "region": "eu-west-1", - "resource_id": "1230b3ae633b4f51a432d50971f7f1c1", - "remote_cluster": []interface{}{ - map[string]interface{}{ - "alias": "alias", - "deployment_id": "someid", - "ref_id": "main-elasticsearch", - "skip_unavailable": true, - }, - map[string]interface{}{ - "deployment_id": "some other id", - "ref_id": "main-elasticsearch", - }, - }, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.ccs.r5d", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "eu-west-1", - "resource_id": "12317425e9e14491b74ee043db3402eb", - "version": "7.9.2", - "http_endpoint": "http://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9200", - "https_endpoint": "https://12317425e9e14491b74ee043db3402eb.eu-west-1.aws.found.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }) - _ = wantAWSCCSDeployment.Set("alias", "") - argCCSRemotes := models.RemoteResources{Resources: []*models.RemoteResourceRef{ - { - Alias: ec.String("alias"), - DeploymentID: ec.String("someid"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - SkipUnavailable: ec.Bool(true), - }, - { - DeploymentID: ec.String("some other id"), - ElasticsearchRefID: ec.String("main-elasticsearch"), - }, - }} - - type args struct { - d *schema.ResourceData - res *models.DeploymentGetResponse - remotes models.RemoteResources - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "flattens deployment resources", - want: wantDeployment, - args: args{ - d: deploymentSchemaArg, - res: &models.DeploymentGetResponse{ - Alias: "my-deployment", - Name: ec.String("my_deployment_name"), - Settings: &models.DeploymentSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }, - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-name"), - Region: "us-east-1", - ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ - DestinationClusterIds: []string{"some"}, - }, - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }, - }, - }, - }, - }, - Kibana: []*models.KibanaResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.KibanaClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-kibana-name"), - Region: "us-east-1", - PlanInfo: &models.KibanaClusterPlansInfo{ - Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - Apm: []*models.ApmResourceInfo{{ - Region: ec.String("us-east-1"), - RefID: ec.String("main-apm"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.ApmInfo{ - Status: ec.String("started"), - ID: &mock.ValidClusterID, - Name: ec.String("some-apm-name"), - Region: "us-east-1", - PlanInfo: &models.ApmPlansInfo{ - Current: &models.ApmPlanInfo{ - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - Version: "7.7.0", - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - }}, - }, - }, - }, - }, - }}, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-enterprise_search"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.EnterpriseSearchInfo{ - Status: ec.String("started"), - ID: &mock.ValidClusterID, - Name: ec.String("some-enterprise_search-name"), - Region: "us-east-1", - PlanInfo: &models.EnterpriseSearchPlansInfo{ - Current: &models.EnterpriseSearchPlanInfo{ - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - { - name: "sets the global version to the lesser version", - args: args{ - d: deploymentLowerVersionSchemaArg, - res: &models.DeploymentGetResponse{ - Alias: "my-deployment", - Name: ec.String("my_deployment_name"), - Settings: &models.DeploymentSettings{ - TrafficFilterSettings: &models.TrafficFilterSettings{ - Rulesets: []string{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }, - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-elasticsearch"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-name"), - Region: "us-east-1", - ElasticsearchMonitoringInfo: &models.ElasticsearchMonitoringInfo{ - DestinationClusterIds: []string{"some"}, - }, - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.7.0", - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - }, - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Elasticsearch: &models.ElasticsearchConfiguration{ - NodeAttributes: map[string]string{"data": "hot"}, - }, - ZoneCount: 1, - InstanceConfigurationID: "aws.data.highio.i3", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.ElasticsearchNodeType{ - Data: ec.Bool(true), - Ingest: ec.Bool(true), - Master: ec.Bool(true), - Ml: ec.Bool(false), - }, - TopologyElementControl: &models.TopologyElementControl{ - Min: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }}, - }, - }, - }, - }, - }, - }, - Kibana: []*models.KibanaResourceInfo{ - { - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.KibanaClusterInfo{ - Status: ec.String("started"), - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-kibana-name"), - Region: "us-east-1", - PlanInfo: &models.KibanaClusterPlansInfo{ - Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.6.2", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.6.2", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "us-east-1", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": 1, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "zone_count": 1, - }, - }, - }}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - }, - }), - }, - { - name: "flattens an azure plan (io-optimized)", - args: args{d: azureIOOptimizedRD, res: azureIOOptimizedRes}, - want: wantAzureIOOptimizedDeployment, - }, - { - name: "flattens an aws plan (io-optimized)", - args: args{d: awsIOOptimizedRD, res: awsIOOptimizedRes}, - want: wantAwsIOOptimizedDeployment, - }, - { - name: "flattens an aws plan with extensions (io-optimized)", - args: args{ - d: awsIOOptimizedExtensionRD, - res: openDeploymentGet(t, "testdata/deployment-aws-io-optimized-extension.json"), - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: map[string]interface{}{ - "alias": "my-deployment", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.9.2", - "apm": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "region": "aws-eu-central-1", - "resource_id": "12328579b3bf40c8b58c1a0ed5a4bd8b", - "version": "7.9.2", - "http_endpoint": "http://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:80", - "https_endpoint": "https://12328579b3bf40c8b58c1a0ed5a4bd8b.apm.eu-central-1.aws.cloud.es.io:443", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "elasticsearch": []interface{}{map[string]interface{}{ - "autoscale": "false", - "cloud_id": "up2d:someCloudID", - "extension": []interface{}{ - map[string]interface{}{ - "name": "custom-bundle", - "version": "7.9.2", - "url": "http://12345", - "type": "bundle", - }, - map[string]interface{}{ - "name": "custom-bundle2", - "version": "7.9.2", - "url": "http://123456", - "type": "bundle", - }, - map[string]interface{}{ - "name": "custom-plugin", - "version": "7.9.2", - "url": "http://12345", - "type": "plugin", - }, - map[string]interface{}{ - "name": "custom-plugin2", - "version": "7.9.2", - "url": "http://123456", - "type": "plugin", - }, - }, - "http_endpoint": "http://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://1239f7ee7196439ba2d105319ac5eba7.eu-central-1.aws.cloud.es.io:9243", - "ref_id": "main-elasticsearch", - "region": "aws-eu-central-1", - "resource_id": "1239f7ee7196439ba2d105319ac5eba7", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "size": "8g", - "size_resource": "memory", - "zone_count": 2, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "region": "aws-eu-central-1", - "resource_id": "123dcfda06254ca789eb287e8b73ff4c", - "version": "7.9.2", - "http_endpoint": "http://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9200", - "https_endpoint": "https://123dcfda06254ca789eb287e8b73ff4c.eu-central-1.aws.cloud.es.io:9243", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an aws plan with trusts", - args: args{ - d: newDeploymentRD(t, "123b7b540dfc967a7a649c18e2fce4ed", nil), - res: &models.DeploymentGetResponse{ - ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), - Alias: "OH", - Name: ec.String("up2d"), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - RefID: ec.String("main-elasticsearch"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.13.1", - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Size: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("memory"), - }, - }}, - }, - }, - }, - Settings: &models.ElasticsearchClusterSettings{ - Trust: &models.ElasticsearchClusterTrustSettings{ - Accounts: []*models.AccountTrustRelationship{ - { - AccountID: ec.String("ANID"), - TrustAll: ec.Bool(true), - }, - { - AccountID: ec.String("anotherID"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", "hij", - }, - }, - }, - External: []*models.ExternalTrustRelationship{ - { - TrustRelationshipID: ec.String("external_id"), - TrustAll: ec.Bool(true), - }, - { - TrustRelationshipID: ec.String("another_external_id"), - TrustAll: ec.Bool(false), - TrustAllowlist: []string{ - "abc", "dfg", - }, - }, - }, - }, - }, - }, - }}, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "123b7b540dfc967a7a649c18e2fce4ed", - State: map[string]interface{}{ - "alias": "OH", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.13.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - "size_resource": "memory", - }}, - "trust_account": []interface{}{ - map[string]interface{}{ - "account_id": "ANID", - "trust_all": "true", - }, - map[string]interface{}{ - "account_id": "anotherID", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "hij", "dfg", - }, - }, - }, - "trust_external": []interface{}{ - map[string]interface{}{ - "relationship_id": "another_external_id", - "trust_all": "false", - "trust_allowlist": []interface{}{ - "abc", "dfg", - }, - }, - map[string]interface{}{ - "relationship_id": "external_id", - "trust_all": "true", - }, - }, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an aws plan with topology.config set", - args: args{ - d: newDeploymentRD(t, "123b7b540dfc967a7a649c18e2fce4ed", nil), - res: &models.DeploymentGetResponse{ - ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), - Alias: "OH", - Name: ec.String("up2d"), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - RefID: ec.String("main-elasticsearch"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.13.1", - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Size: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("memory"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - UserSettingsYaml: "a.setting: true", - }, - }}, - }, - }, - }, - Settings: &models.ElasticsearchClusterSettings{}, - }, - }}, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "123b7b540dfc967a7a649c18e2fce4ed", - State: map[string]interface{}{ - "alias": "OH", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.13.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - "size_resource": "memory", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "a.setting: true", - }}, - }}, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an plan with config.docker_image set", - args: args{ - d: newDeploymentRD(t, "123b7b540dfc967a7a649c18e2fce4ed", nil), - res: &models.DeploymentGetResponse{ - ID: ec.String("123b7b540dfc967a7a649c18e2fce4ed"), - Alias: "OH", - Name: ec.String("up2d"), - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - RefID: ec.String("main-elasticsearch"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized-v2"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/elasticsearch/cloud:7.14.1-hash", - }, - ClusterTopology: []*models.ElasticsearchClusterTopologyElement{{ - ID: "hot_content", - Size: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("memory"), - }, - Elasticsearch: &models.ElasticsearchConfiguration{ - UserSettingsYaml: "a.setting: true", - }, - ZoneCount: 1, - }}, - }, - }, - }, - Settings: &models.ElasticsearchClusterSettings{}, - }, - }}, - Apm: []*models.ApmResourceInfo{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - RefID: ec.String("main-apm"), - Region: ec.String("aws-eu-central-1"), - Info: &models.ApmInfo{ - Status: ec.String("running"), - PlanInfo: &models.ApmPlansInfo{Current: &models.ApmPlanInfo{ - Plan: &models.ApmPlan{ - Apm: &models.ApmConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/apm/cloud:7.14.1-hash", - SystemSettings: &models.ApmSystemSettings{ - DebugEnabled: ec.Bool(false), - }, - }, - ClusterTopology: []*models.ApmTopologyElement{{ - InstanceConfigurationID: "aws.apm.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(512), - }, - ZoneCount: 1, - }}, - }, - }}, - }, - }}, - Kibana: []*models.KibanaResourceInfo{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - RefID: ec.String("main-kibana"), - Region: ec.String("aws-eu-central-1"), - Info: &models.KibanaClusterInfo{ - Status: ec.String("running"), - PlanInfo: &models.KibanaClusterPlansInfo{Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/kibana/cloud:7.14.1-hash", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{{ - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - ZoneCount: 1, - }}, - }, - }}, - }, - }}, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{{ - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - RefID: ec.String("main-enterprise_search"), - Region: ec.String("aws-eu-central-1"), - Info: &models.EnterpriseSearchInfo{ - Status: ec.String("running"), - PlanInfo: &models.EnterpriseSearchPlansInfo{Current: &models.EnterpriseSearchPlanInfo{ - Plan: &models.EnterpriseSearchPlan{ - EnterpriseSearch: &models.EnterpriseSearchConfiguration{ - Version: "7.14.1", - DockerImage: "docker.elastic.com/enterprise_search/cloud:7.14.1-hash", - }, - ClusterTopology: []*models.EnterpriseSearchTopologyElement{{ - InstanceConfigurationID: "aws.enterprisesearch.m5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - NodeType: &models.EnterpriseSearchNodeTypes{ - Appserver: ec.Bool(true), - Connector: ec.Bool(true), - Worker: ec.Bool(true), - }, - ZoneCount: 2, - }}, - }, - }}, - }, - }}, - }, - }, - }, - want: util.NewResourceData(t, util.ResDataParams{ - ID: "123b7b540dfc967a7a649c18e2fce4ed", - State: map[string]interface{}{ - "alias": "OH", - "deployment_template_id": "aws-io-optimized-v2", - "id": "123b7b540dfc967a7a649c18e2fce4ed", - "name": "up2d", - "region": "aws-eu-central-1", - "version": "7.14.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-elasticsearch", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/elasticsearch/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "a.setting: true", - }}, - }}, - }}, - "kibana": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-kibana", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/kibana/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "apm": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-apm", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/apm/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - "enterprise_search": []interface{}{map[string]interface{}{ - "region": "aws-eu-central-1", - "ref_id": "main-enterprise_search", - "config": []interface{}{map[string]interface{}{ - "docker_image": "docker.elastic.com/enterprise_search/cloud:7.14.1-hash", - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "2g", - "size_resource": "memory", - "zone_count": 2, - "node_type_appserver": "true", - "node_type_connector": "true", - "node_type_worker": "true", - }}, - }}, - }, - Schema: newSchema(), - }), - }, - { - name: "flattens an aws plan (io-optimized) with tags", - args: args{d: awsIOOptimizedTagsRD, res: awsIOOptimizedTagsRes}, - want: wantAwsIOOptimizedDeploymentTags, - }, - { - name: "flattens a gcp plan (io-optimized)", - args: args{d: gcpIOOptimizedRD, res: gcpIOOptimizedRes}, - want: wantGcpIOOptimizedDeployment, - }, - { - name: "flattens a gcp plan with autoscale set (io-optimized)", - args: args{d: gcpIOOptimizedRD, res: gcpIOOptimizedAutoscaleRes}, - want: wantGcpIOOptAutoscale, - }, - { - name: "flattens a gcp plan (hot-warm)", - args: args{d: gcpHotWarmRD, res: gcpHotWarmRes}, - want: wantGcpHotWarmDeployment, - }, - { - name: "flattens a gcp plan (hot-warm) with node_roles", - args: args{d: gcpHotWarmNodeRolesRD, res: gcpHotWarmNodeRolesRes}, - want: wantGcpHotWarmNodeRolesDeployment, - }, - { - name: "flattens an aws plan (Cross Cluster Search)", - args: args{d: awsCCSRD, res: awsCCSRes, remotes: argCCSRemotes}, - want: wantAWSCCSDeployment, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res, tt.args.remotes) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - var wantState interface{} - if tt.want != nil { - wantState = tt.want.State().Attributes - } - - assert.Equal(t, wantState, tt.args.d.State().Attributes) - }) - } -} - -func Test_getDeploymentTemplateID(t *testing.T) { - type args struct { - res *models.DeploymentResources - } - tests := []struct { - name string - args args - want string - err error - }{ - { - name: "empty resources returns an error", - args: args{res: &models.DeploymentResources{}}, - err: errors.New("failed to obtain the deployment template id"), - }, - { - name: "single empty current plan returns error", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Pending: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized"), - }, - }, - }, - }, - }, - }, - }, - }}, - err: errors.New("failed to obtain the deployment template id"), - }, - { - name: "multiple deployment templates returns an error", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("someid"), - }, - }, - }, - }, - }, - }, - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("someotherid"), - }, - }, - }, - }, - }, - }, - }, - }}, - err: errors.New("there are more than 1 deployment templates specified on the deployment: \"someid, someotherid\""), - }, - { - name: "single deployment template returns it", - args: args{res: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - DeploymentTemplate: &models.DeploymentTemplateReference{ - ID: ec.String("aws-io-optimized"), - }, - }, - }, - }, - }, - }, - }, - }}, - want: "aws-io-optimized", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getDeploymentTemplateID(tt.args.res) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_parseCredentials(t *testing.T) { - deploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - rawData := newSampleLegacyDeployment() - rawData["elasticsearch_username"] = "my-username" - rawData["elasticsearch_password"] = "my-password" - rawData["apm_secret_token"] = "some-secret-token" - - wantDeploymentRD := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: rawData, - Schema: newSchema(), - }) - - type args struct { - d *schema.ResourceData - resources []*models.DeploymentResource - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "Parses credentials", - args: args{ - d: deploymentRD, - resources: []*models.DeploymentResource{{ - Credentials: &models.ClusterCredentials{ - Username: ec.String("my-username"), - Password: ec.String("my-password"), - }, - SecretToken: "some-secret-token", - }}, - }, - want: wantDeploymentRD, - }, - { - name: "when no credentials are passed, it doesn't overwrite them", - args: args{ - d: util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: rawData, - Schema: newSchema(), - }), - resources: []*models.DeploymentResource{ - {}, - }, - }, - want: wantDeploymentRD, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := parseCredentials(tt.args.d, tt.args.resources) - if tt.err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want.State().Attributes, tt.args.d.State().Attributes) - }) - } -} - -func Test_hasRunningResources(t *testing.T) { - type args struct { - res *models.DeploymentGetResponse - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "has all the resources stopped", - args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - {Info: &models.ElasticsearchClusterInfo{Status: ec.String("stopped")}}, - }, - Kibana: []*models.KibanaResourceInfo{ - {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, - }, - Apm: []*models.ApmResourceInfo{ - {Info: &models.ApmInfo{Status: ec.String("stopped")}}, - }, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - {Info: &models.EnterpriseSearchInfo{Status: ec.String("stopped")}}, - }, - }}}, - want: false, - }, - { - name: "has some resources stopped", - args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, - }, - Kibana: []*models.KibanaResourceInfo{ - {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, - }, - Apm: []*models.ApmResourceInfo{ - {Info: &models.ApmInfo{Status: ec.String("running")}}, - }, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, - }, - }}}, - want: true, - }, - { - name: "has all resources running", - args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{ - {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, - }, - Kibana: []*models.KibanaResourceInfo{ - {Info: &models.KibanaClusterInfo{Status: ec.String("running")}}, - }, - Apm: []*models.ApmResourceInfo{ - {Info: &models.ApmInfo{Status: ec.String("running")}}, - }, - EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ - {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, - }, - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - if got := hasRunningResources(tt.args.res); got != tt.want { - t.Errorf("hasRunningResources() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/ec/ecresource/deploymentresource/import.go b/ec/ecresource/deploymentresource/import.go deleted file mode 100644 index f685cfa9d..000000000 --- a/ec/ecresource/deploymentresource/import.go +++ /dev/null @@ -1,75 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "errors" - "fmt" - - semver "github.com/blang/semver/v4" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// Setting this variable here so that it is parsed at compile time in case -// any errors are thrown, they are at compile time not when the user runs it. -var ilmVersion = semver.MustParse("6.6.0") - -// imports a deployment limitting the allowed version to 6.6.0 or higher. -// TODO: It might be desired to provide the ability to import a deployment -// specifying key:value pairs of secrets to populate as part of the -// import with an implementation of schema.StateContextFunc. -func importFunc(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) { - client := m.(*api.API) - res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: client, - DeploymentID: d.Id(), - QueryParams: deputil.QueryParams{ - ShowPlans: true, - }, - }) - if err != nil { - return nil, err - } - - if len(res.Resources.Elasticsearch) == 0 { - return nil, errors.New( - "invalid deployment: deployment has no elasticsearch resources", - ) - } - - v, err := semver.New( - res.Resources.Elasticsearch[0].Info.PlanInfo.Current.Plan.Elasticsearch.Version, - ) - if err != nil { - return nil, fmt.Errorf("unable to parse deployment version: %w", err) - } - - if v.LT(ilmVersion) { - return nil, fmt.Errorf( - `invalid deployment version "%s": minimum supported version is "%s"`, - v.String(), ilmVersion.String(), - ) - } - - return []*schema.ResourceData{d}, nil -} diff --git a/ec/ecresource/deploymentresource/import_test.go b/ec/ecresource/deploymentresource/import_test.go deleted file mode 100644 index f8e631ff0..000000000 --- a/ec/ecresource/deploymentresource/import_test.go +++ /dev/null @@ -1,240 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func Test_importFunc(t *testing.T) { - deploymentWithImportableVersion := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "7.9.2", - "elasticsearch": []interface{}{map[string]interface{}{}}, - }, - }) - deploymentWithNonImportableVersion := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "5.6.1", - "elasticsearch": []interface{}{map[string]interface{}{}}, - }, - }) - deploymentWithNonImportableVersionSix := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "my_deployment_name", - "deployment_template_id": "aws-cross-cluster-search-v2", - "region": "us-east-1", - "version": "6.5.1", - "elasticsearch": []interface{}{map[string]interface{}{}}, - }, - }) - type args struct { - ctx context.Context - d *schema.ResourceData - m interface{} - } - tests := []struct { - name string - args args - want map[string]string - err error - }{ - { - name: "succeeds with an importable version", - args: args{ - d: deploymentWithImportableVersion, - m: api.NewMock(mock.New200Response(mock.NewStructBody(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "7.9.2", - }, - }, - }, - }, - }, - }, - }}, - }))), - }, - want: map[string]string{ - "id": "320b7b540dfc967a7a649c18e2fce4ed", - - "name": "my_deployment_name", - "region": "us-east-1", - "version": "7.9.2", - "deployment_template_id": "aws-cross-cluster-search-v2", - - "elasticsearch.#": "1", - "elasticsearch.0.autoscale": "", - "elasticsearch.0.cloud_id": "", - "elasticsearch.0.snapshot_source.#": "0", - "elasticsearch.0.config.#": "0", - "elasticsearch.0.extension.#": "0", - "elasticsearch.0.http_endpoint": "", - "elasticsearch.0.https_endpoint": "", - "elasticsearch.0.ref_id": "main-elasticsearch", - "elasticsearch.0.region": "", - "elasticsearch.0.remote_cluster.#": "0", - "elasticsearch.0.resource_id": "", - "elasticsearch.0.topology.#": "0", - "elasticsearch.0.trust_account.#": "0", - "elasticsearch.0.trust_external.#": "0", - "elasticsearch.0.strategy.#": "0", - }, - }, - { - name: "fails with a non importable version (5.6.1)", - args: args{ - d: deploymentWithNonImportableVersion, - m: api.NewMock(mock.New200Response(mock.NewStructBody(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "5.6.1", - }, - }, - }, - }, - }, - }, - }}, - }))), - }, - err: errors.New(`invalid deployment version "5.6.1": minimum supported version is "6.6.0"`), - want: map[string]string{ - "id": "320b7b540dfc967a7a649c18e2fce4ed", - - "name": "my_deployment_name", - "region": "us-east-1", - "version": "5.6.1", - "deployment_template_id": "aws-cross-cluster-search-v2", - - "elasticsearch.#": "1", - "elasticsearch.0.autoscale": "", - "elasticsearch.0.cloud_id": "", - "elasticsearch.0.snapshot_source.#": "0", - "elasticsearch.0.config.#": "0", - "elasticsearch.0.extension.#": "0", - "elasticsearch.0.http_endpoint": "", - "elasticsearch.0.https_endpoint": "", - "elasticsearch.0.ref_id": "main-elasticsearch", - "elasticsearch.0.region": "", - "elasticsearch.0.remote_cluster.#": "0", - "elasticsearch.0.resource_id": "", - "elasticsearch.0.topology.#": "0", - "elasticsearch.0.trust_account.#": "0", - "elasticsearch.0.trust_external.#": "0", - "elasticsearch.0.strategy.#": "0", - }, - }, - { - name: "fails with a non importable version (6.5.1)", - args: args{ - d: deploymentWithNonImportableVersionSix, - m: api.NewMock(mock.New200Response(mock.NewStructBody(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{Elasticsearch: []*models.ElasticsearchResourceInfo{ - { - Info: &models.ElasticsearchClusterInfo{ - PlanInfo: &models.ElasticsearchClusterPlansInfo{ - Current: &models.ElasticsearchClusterPlanInfo{ - Plan: &models.ElasticsearchClusterPlan{ - Elasticsearch: &models.ElasticsearchConfiguration{ - Version: "6.5.1", - }, - }, - }, - }, - }, - }, - }}, - }))), - }, - err: errors.New(`invalid deployment version "6.5.1": minimum supported version is "6.6.0"`), - want: map[string]string{ - "id": "320b7b540dfc967a7a649c18e2fce4ed", - - "name": "my_deployment_name", - "region": "us-east-1", - "version": "6.5.1", - "deployment_template_id": "aws-cross-cluster-search-v2", - - "elasticsearch.#": "1", - "elasticsearch.0.autoscale": "", - "elasticsearch.0.cloud_id": "", - "elasticsearch.0.snapshot_source.#": "0", - "elasticsearch.0.config.#": "0", - "elasticsearch.0.extension.#": "0", - "elasticsearch.0.http_endpoint": "", - "elasticsearch.0.https_endpoint": "", - "elasticsearch.0.ref_id": "main-elasticsearch", - "elasticsearch.0.region": "", - "elasticsearch.0.remote_cluster.#": "0", - "elasticsearch.0.resource_id": "", - "elasticsearch.0.topology.#": "0", - "elasticsearch.0.trust_account.#": "0", - "elasticsearch.0.trust_external.#": "0", - "elasticsearch.0.strategy.#": "0", - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := importFunc(tt.args.ctx, tt.args.d, tt.args.m) - if tt.err != nil { - if !assert.EqualError(t, err, tt.err.Error()) { - t.Error(err) - } - } else { - assert.NoError(t, err) - } - - assert.Equal(t, tt.want, tt.args.d.State().Attributes) - }) - } -} diff --git a/ec/ecresource/deploymentresource/integrations_server_expanders.go b/ec/ecresource/deploymentresource/integrations_server_expanders.go deleted file mode 100644 index de9718aa8..000000000 --- a/ec/ecresource/deploymentresource/integrations_server_expanders.go +++ /dev/null @@ -1,213 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandIntegrationsServerResources expands IntegrationsServer resources into their models. -func expandIntegrationsServerResources(integrationsServers []interface{}, tpl *models.IntegrationsServerPayload) ([]*models.IntegrationsServerPayload, error) { - if len(integrationsServers) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("IntegrationsServer specified but deployment template is not configured for it. Use a different template if you wish to add IntegrationsServer") - } - - result := make([]*models.IntegrationsServerPayload, 0, len(integrationsServers)) - for _, raw := range integrationsServers { - resResource, err := expandIntegrationsServerResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandIntegrationsServerResource(raw interface{}, res *models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, error) { - var integrationsServer = raw.(map[string]interface{}) - - if esRefID, ok := integrationsServer["elasticsearch_cluster_ref_id"].(string); ok { - res.ElasticsearchClusterRefID = ec.String(esRefID) - } - - if refID, ok := integrationsServer["ref_id"].(string); ok { - res.RefID = ec.String(refID) - } - - if region, ok := integrationsServer["region"].(string); ok && region != "" { - res.Region = ec.String(region) - } - - if cfg, ok := integrationsServer["config"].([]interface{}); ok { - if err := expandIntegrationsServerConfig(cfg, res.Plan.IntegrationsServer); err != nil { - return nil, err - } - } - - if rt, ok := integrationsServer["topology"].([]interface{}); ok && len(rt) > 0 { - topology, err := expandIntegrationsServerTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultIntegrationsServerTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandIntegrationsServerTopology(rawTopologies []interface{}, topologies []*models.IntegrationsServerTopologyElement) ([]*models.IntegrationsServerTopologyElement, error) { - res := make([]*models.IntegrationsServerTopologyElement, 0, len(rawTopologies)) - - for i, rawTop := range rawTopologies { - topology, ok := rawTop.(map[string]interface{}) - if !ok { - continue - } - - var icID string - if id, ok := topology["instance_configuration_id"].(string); ok { - icID = id - } - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultIntegrationsServerTopology(topologies); icID == "" && len(t) > i { - icID = t[i].InstanceConfigurationID - } - - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchIntegrationsServerTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"].(int); ok && zones > 0 { - elem.ZoneCount = int32(zones) - } - - res = append(res, elem) - } - - return res, nil -} - -func expandIntegrationsServerConfig(raw []interface{}, res *models.IntegrationsServerConfiguration) error { - for _, rawCfg := range raw { - cfg, ok := rawCfg.(map[string]interface{}) - if !ok { - continue - } - - if debugEnabled, ok := cfg["debug_enabled"].(bool); ok { - if res.SystemSettings == nil { - res.SystemSettings = &models.IntegrationsServerSystemSettings{} - } - res.SystemSettings.DebugEnabled = ec.Bool(debugEnabled) - } - - if settings, ok := cfg["user_settings_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding IntegrationsServer user_settings_json: %w", err) - } - } - if settings, ok := cfg["user_settings_override_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding IntegrationsServer user_settings_override_json: %w", err) - } - } - if settings, ok := cfg["user_settings_yaml"].(string); ok && settings != "" { - res.UserSettingsYaml = settings - } - if settings, ok := cfg["user_settings_override_yaml"].(string); ok && settings != "" { - res.UserSettingsOverrideYaml = settings - } - - if v, ok := cfg["docker_image"].(string); ok { - res.DockerImage = v - } - } - - return nil -} - -// defaultIntegrationsServerTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is smaller than the -// deployment template default, the same is done on the ZoneCount. -func defaultIntegrationsServerTopology(topology []*models.IntegrationsServerTopologyElement) []*models.IntegrationsServerTopologyElement { - for _, t := range topology { - if *t.Size.Value < minimumIntegrationsServerSize { - t.Size.Value = ec.Int32(minimumIntegrationsServerSize) - } - if t.ZoneCount < minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchIntegrationsServerTopology(id string, topologies []*models.IntegrationsServerTopologyElement) (*models.IntegrationsServerTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `IntegrationsServer topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// IntegrationsServerResource returns the IntegrationsServerPayload from a deployment -// template or an empty version of the payload. -func integrationsServerResource(res *models.DeploymentTemplateInfoV2) *models.IntegrationsServerPayload { - if len(res.DeploymentTemplate.Resources.IntegrationsServer) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.IntegrationsServer[0] -} - -// integrationsServerResourceFromUpdate returns the IntegrationsServerPayload from a deployment -// update request or an empty version of the payload. -func integrationsServerResourceFromUpdate(res *models.DeploymentUpdateResources) *models.IntegrationsServerPayload { - if len(res.IntegrationsServer) == 0 { - return nil - } - return res.IntegrationsServer[0] -} diff --git a/ec/ecresource/deploymentresource/integrations_server_expanders_test.go b/ec/ecresource/deploymentresource/integrations_server_expanders_test.go deleted file mode 100644 index 263189ed3..000000000 --- a/ec/ecresource/deploymentresource/integrations_server_expanders_test.go +++ /dev/null @@ -1,332 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" -) - -func Test_expandIntegrationsServerResources(t *testing.T) { - tplPath := "testdata/template-ece-3.0.0-default.json" - tpl := func() *models.IntegrationsServerPayload { - return integrationsServerResource(parseDeploymentTemplate(t, - tplPath, - )) - } - type args struct { - ess []interface{} - tpl *models.IntegrationsServerPayload - } - tests := []struct { - name string - args args - want []*models.IntegrationsServerPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses an Integrations Server resource with explicit topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.IntegrationsServerPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{}, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an Integrations Server resource with invalid instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "so invalid", - "size": "2g", - "size_resource": "memory", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`IntegrationsServer topology: invalid instance_configuration_id: "so invalid" doesn't match any of the deployment template instance configurations`), - }, - { - name: "parses an Integrations Server resource with no topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }, - }, - }, - want: []*models.IntegrationsServerPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{}, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }, - }, - }, - { - name: "parses an Integrations Server resource with a topology element but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }}, - }, - }, - }, - want: []*models.IntegrationsServerPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{}, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }}, - }, - }, - }, - }, - { - name: "parses an Integrations Server resource with multiple topology elements but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{ - map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }, map[string]interface{}{ - "size": "2g", - "size_resource": "memory", - }, - }, - }, - }, - }, - err: errors.New("IntegrationsServer topology: invalid instance_configuration_id: \"\" doesn't match any of the deployment template instance configurations"), - }, - { - name: "parses an Integrations Server resource with explicit topology and some config", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-integrations_server", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\": \"value\"}", - "user_settings_override_json": "{\"some.setting\": \"value2\"}", - "debug_enabled": true, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - want: []*models.IntegrationsServerPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("tertiary-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{ - UserSettingsYaml: `some.setting: value`, - UserSettingsOverrideYaml: `some.setting: value2`, - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "value2", - }, - SystemSettings: &models.IntegrationsServerSystemSettings{ - DebugEnabled: ec.Bool(true), - }, - }, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }}, - }, - { - name: "parses an Integrations Server resource with explicit nils", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-integrations_server", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": nil, - "user_settings_override_yaml": nil, - "user_settings_json": nil, - "user_settings_override_json": nil, - "debug_enabled": true, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": nil, - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - want: []*models.IntegrationsServerPayload{{ - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("tertiary-integrations_server"), - Plan: &models.IntegrationsServerPlan{ - IntegrationsServer: &models.IntegrationsServerConfiguration{ - SystemSettings: &models.IntegrationsServerSystemSettings{ - DebugEnabled: ec.Bool(true), - }, - }, - ClusterTopology: []*models.IntegrationsServerTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "integrations.server", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }}, - }, - }}, - }, - { - name: "tries to parse an integrations_server resource when the template doesn't have an Integrations Server instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-integrations_server", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "integrations.server", - "size": "4g", - "size_resource": "memory", - "zone_count": 1, - }}, - "config": []interface{}{map[string]interface{}{ - "debug_enabled": true, - }}, - }}, - }, - err: errors.New("IntegrationsServer specified but deployment template is not configured for it. Use a different template if you wish to add IntegrationsServer"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandIntegrationsServerResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/integrations_server_flatteners.go b/ec/ecresource/deploymentresource/integrations_server_flatteners.go deleted file mode 100644 index 700c3c21f..000000000 --- a/ec/ecresource/deploymentresource/integrations_server_flatteners.go +++ /dev/null @@ -1,159 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenIntegrationsServerResources flattens IntegrationsServer resources into its flattened structure. -func flattenIntegrationsServerResources(in []*models.IntegrationsServerResourceInfo, name string) []interface{} { - var result = make([]interface{}, 0, len(in)) - for _, res := range in { - var m = make(map[string]interface{}) - if util.IsCurrentIntegrationsServerPlanEmpty(res) || isIntegrationsServerResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ID != nil && *res.Info.ID != "" { - m["resource_id"] = *res.Info.ID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenIntegrationsServerTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - for _, url := range res.Info.Metadata.ServicesUrls { - m[fmt.Sprintf("%s_https_endpoint", *url.Service)] = *url.URL - } - - if cfg := flattenIntegrationsServerConfig(plan.IntegrationsServer); len(cfg) > 0 { - m["config"] = cfg - } - - result = append(result, m) - } - - return result -} - -func flattenIntegrationsServerTopology(plan *models.IntegrationsServerPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenIntegrationsServerConfig(cfg *models.IntegrationsServerConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - for k, v := range flattenIntegrationsServerSystemConfig(cfg.SystemSettings) { - m[k] = v - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func flattenIntegrationsServerSystemConfig(cfg *models.IntegrationsServerSystemSettings) map[string]interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.DebugEnabled != nil { - m["debug_enabled"] = *cfg.DebugEnabled - } - - if len(m) == 0 { - return nil - } - - return m -} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go new file mode 100644 index 000000000..f0a5b62cc --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server.go @@ -0,0 +1,47 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type IntegrationsServerTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type IntegrationsServer struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology topologyv1.Topologies `tfsdk:"topology"` + Config IntegrationsServerConfigs `tfsdk:"config"` +} + +type IntegrationsServers []IntegrationsServer diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go new file mode 100644 index 000000000..821f53fe0 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/integrations_server_config.go @@ -0,0 +1,42 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type IntegrationsServerConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + DebugEnabled types.Bool `tfsdk:"debug_enabled"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type IntegrationsServerConfig struct { + DockerImage *string `tfsdk:"docker_image"` + DebugEnabled *bool `tfsdk:"debug_enabled"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type IntegrationsServerConfigs []IntegrationsServerConfig diff --git a/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go new file mode 100644 index 000000000..69b3285ee --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v1/schema.go @@ -0,0 +1,165 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func IntegrationsServerSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Integrations Server resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-integrations_server"}), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "topology": { + Description: "Optional topology attribute", + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "config": { + Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + // IntegrationsServer System Settings + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go new file mode 100644 index 000000000..636ee9fd1 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_config.go @@ -0,0 +1,124 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v1" + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type IntegrationsServerConfig v1.IntegrationsServerConfig + +func readIntegrationsServerConfigs(in *models.IntegrationsServerConfiguration) (*IntegrationsServerConfig, error) { + var cfg IntegrationsServerConfig + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if in.SystemSettings != nil { + if in.SystemSettings.DebugEnabled != nil { + cfg.DebugEnabled = in.SystemSettings.DebugEnabled + } + } + + if cfg == (IntegrationsServerConfig{}) { + return nil, nil + } + + return &cfg, nil +} + +func integrationsServerConfigPayload(ctx context.Context, cfgObj attr.Value, res *models.IntegrationsServerConfiguration) diag.Diagnostics { + var diags diag.Diagnostics + + if cfgObj.IsNull() || cfgObj.IsUnknown() { + return nil + } + + var cfg *v1.IntegrationsServerConfigTF + + if diags = tfsdk.ValueAs(ctx, cfgObj, &cfg); diags.HasError() { + return nil + } + + if cfg == nil { + return nil + } + + if !cfg.DebugEnabled.IsNull() { + if res.SystemSettings == nil { + res.SystemSettings = &models.IntegrationsServerSystemSettings{} + } + res.SystemSettings.DebugEnabled = &cfg.DebugEnabled.Value + } + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &res.UserSettingsJSON); err != nil { + diags.AddError("failed expanding IntegrationsServer user_settings_json", err.Error()) + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &res.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding IntegrationsServer user_settings_override_json", err.Error()) + } + } + + if !cfg.UserSettingsYaml.IsNull() { + res.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + res.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + res.DockerImage = cfg.DockerImage.Value + } + + return diags +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go new file mode 100644 index 000000000..67833eb15 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload.go @@ -0,0 +1,116 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type IntegrationsServerTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + Config types.Object `tfsdk:"config"` +} + +func (srv IntegrationsServerTF) payload(ctx context.Context, payload models.IntegrationsServerPayload) (*models.IntegrationsServerPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !srv.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &srv.ElasticsearchClusterRefId.Value + } + + if !srv.RefId.IsNull() { + payload.RefID = &srv.RefId.Value + } + + if srv.Region.Value != "" { + payload.Region = &srv.Region.Value + } + + ds := integrationsServerConfigPayload(ctx, srv.Config, payload.Plan.IntegrationsServer) + diags.Append(ds...) + + topologyTF := topologyv1.TopologyTF{ + InstanceConfigurationId: srv.InstanceConfigurationId, + Size: srv.Size, + SizeResource: srv.SizeResource, + ZoneCount: srv.ZoneCount, + } + + toplogyPayload, ds := integrationsServerTopologyPayload(ctx, topologyTF, defaultIntegrationsServerTopology(payload.Plan.ClusterTopology), 0) + + diags.Append(ds...) + + if !ds.HasError() && toplogyPayload != nil { + payload.Plan.ClusterTopology = []*models.IntegrationsServerTopologyElement{toplogyPayload} + } + + return &payload, diags +} + +func IntegrationsServerPayload(ctx context.Context, srvObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.IntegrationsServerPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + var srv *IntegrationsServerTF + + if diags = tfsdk.ValueAs(ctx, srvObj, &srv); diags.HasError() { + return nil, diags + } + + if srv == nil { + return nil, nil + } + + templatePayload := payloadFromTemplate(template) + + if templatePayload == nil { + diags.AddError("integrations_server payload error", "integrations_server specified but deployment template is not configured for it. Use a different template if you wish to add integrations_server") + return nil, diags + } + + payload, diags := srv.payload(ctx, *templatePayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// payloadFromTemplate returns the IntegrationsServerPayload from a deployment +// template or an empty version of the payload. +func payloadFromTemplate(template *models.DeploymentTemplateInfoV2) *models.IntegrationsServerPayload { + if template == nil || len(template.DeploymentTemplate.Resources.IntegrationsServer) == 0 { + return nil + } + return template.DeploymentTemplate.Resources.IntegrationsServer[0] +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go new file mode 100644 index 000000000..baa001e2d --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_payload_test.go @@ -0,0 +1,254 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_IntegrationsServerPayload(t *testing.T) { + tplPath := "../../testdata/template-ece-3.0.0-default.json" + tpl := func() *models.DeploymentTemplateInfoV2 { + return testutil.ParseDeploymentTemplate(t, tplPath) + } + type args struct { + srv *IntegrationsServer + tpl *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.IntegrationsServerPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses an Integrations Server resource with explicit topology", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("integrations.server"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{}, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an Integrations Server resource with invalid instance_configuration_id", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("invalid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("integrations_server topology payload error", `invalid instance_configuration_id: "invalid" doesn't match any of the deployment template instance configurations`) + return diags + }(), + }, + { + name: "parses an Integrations Server resource with no topology", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{}, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }}, + }, + }, + }, + { + name: "parses an Integrations Server resource with a topology element but no instance_configuration_id", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("2g"), + SizeResource: ec.String("memory"), + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{}, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }}, + }, + }, + }, + { + name: "parses an Integrations Server resource with explicit topology and some config", + args: args{ + tpl: tpl(), + srv: &IntegrationsServer{ + RefId: ec.String("tertiary-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &IntegrationsServerConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\": \"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\": \"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + InstanceConfigurationId: ec.String("integrations.server"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + want: &models.IntegrationsServerPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("tertiary-integrations_server"), + Plan: &models.IntegrationsServerPlan{ + IntegrationsServer: &models.IntegrationsServerConfiguration{ + UserSettingsYaml: `some.setting: value`, + UserSettingsOverrideYaml: `some.setting: value2`, + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "value2", + }, + SystemSettings: &models.IntegrationsServerSystemSettings{ + DebugEnabled: ec.Bool(true), + }, + }, + ClusterTopology: []*models.IntegrationsServerTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "integrations.server", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }}, + }, + }, + }, + { + name: "tries to parse an integrations_server resource when the template doesn't have an Integrations Server instance set.", + args: args{ + tpl: nil, + srv: &IntegrationsServer{ + RefId: ec.String("tertiary-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &IntegrationsServerConfig{ + DebugEnabled: ec.Bool(true), + }, + InstanceConfigurationId: ec.String("integrations.server"), + Size: ec.String("4g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("integrations_server payload error", "integrations_server specified but deployment template is not configured for it. Use a different template if you wish to add integrations_server") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var srv types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.srv, IntegrationsServerSchema().FrameworkType(), &srv) + assert.Nil(t, diags) + + if got, diags := IntegrationsServerPayload(context.Background(), srv, tt.args.tpl); tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go new file mode 100644 index 000000000..ac9c698d9 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type IntegrationsServer struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *IntegrationsServerConfig `tfsdk:"config"` +} + +func ReadIntegrationsServers(in []*models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { + for _, model := range in { + if util.IsCurrentIntegrationsServerPlanEmpty(model) || IsIntegrationsServerStopped(model) { + continue + } + + srv, err := readIntegrationsServer(model) + if err != nil { + return nil, err + } + + return srv, nil + } + + return nil, nil +} + +func readIntegrationsServer(in *models.IntegrationsServerResourceInfo) (*IntegrationsServer, error) { + + var srv IntegrationsServer + + srv.RefId = in.RefID + + srv.ResourceId = in.Info.ID + + srv.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + + topologies, err := readIntegrationsServerTopologies(plan.ClusterTopology) + + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + srv.InstanceConfigurationId = topologies[0].InstanceConfigurationId + srv.Size = topologies[0].Size + srv.SizeResource = topologies[0].SizeResource + srv.ZoneCount = topologies[0].ZoneCount + } + + srv.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + srv.HttpEndpoint, srv.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + cfg, err := readIntegrationsServerConfigs(plan.IntegrationsServer) + + if err != nil { + return nil, err + } + + srv.Config = cfg + + return &srv, nil +} + +// IsIntegrationsServerStopped returns true if the resource is stopped. +func IsIntegrationsServerStopped(res *models.IntegrationsServerResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go similarity index 70% rename from ec/ecresource/deploymentresource/integrations_server_flatteners_test.go rename to ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go index 9d751cf75..3c4711f05 100644 --- a/ec/ecresource/deploymentresource/integrations_server_flatteners_test.go +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_read_test.go @@ -15,31 +15,34 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) -func Test_flattenIntegrationsServerResource(t *testing.T) { +func Test_readIntegrationsServer(t *testing.T) { type args struct { - in []*models.IntegrationsServerResourceInfo - name string + in []*models.IntegrationsServerResourceInfo } tests := []struct { name string args args - want []interface{} + want *IntegrationsServer }{ { name: "empty resource list returns empty list", args: args{in: []*models.IntegrationsServerResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -52,7 +55,7 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses the integrations_server resource", @@ -103,25 +106,17 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "fleet_https_endpoint": "https://fleet_endpoint.cloud.elastic.co", - "apm_https_endpoint": "https://apm_endpoint.cloud.elastic.co", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }, - }, - }, + want: &IntegrationsServer{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://integrations_serverresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://integrations_serverresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.integrations_server.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, }, }, { @@ -236,28 +231,24 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "fleet_https_endpoint": "https://fleet_endpoint.cloud.elastic.co", - "apm_https_endpoint": "https://apm_endpoint.cloud.elastic.co", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - }}, + want: &IntegrationsServer{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://integrations_serverresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://integrations_serverresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.integrations_server.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &IntegrationsServerConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + }, + }, }, { name: "parses the integrations_server resource with config overrides and system settings", @@ -319,35 +310,67 @@ func Test_flattenIntegrationsServerResource(t *testing.T) { }, }, }}, - want: []interface{}{map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-integrations_server", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://integrations_serverresource.cloud.elastic.co:9200", - "https_endpoint": "https://integrations_serverresource.cloud.elastic.co:9243", - "fleet_https_endpoint": "https://fleet_endpoint.cloud.elastic.co", - "apm_https_endpoint": "https://apm_endpoint.cloud.elastic.co", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.integrations_server.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", + want: &IntegrationsServer{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-integrations_server"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://integrations_serverresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://integrations_serverresource.cloud.elastic.co:9243"), + InstanceConfigurationId: ec.String("aws.integrations_server.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + Config: &IntegrationsServerConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: value2"), + UserSettingsJson: ec.String("{\"some.setting\":\"value\"}"), + UserSettingsOverrideJson: ec.String("{\"some.setting\":\"value2\"}"), + DebugEnabled: ec.Bool(true), + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + srv, err := ReadIntegrationsServers(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, srv) - "debug_enabled": true, - }}, - }}, + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), srv, IntegrationsServerSchema().FrameworkType(), &obj) + assert.Nil(t, diags) + }) + } +} + +func Test_IsIntegrationsServerStopped(t *testing.T) { + type args struct { + res *models.IntegrationsServerResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.IntegrationsServerResourceInfo{Info: &models.IntegrationsServerInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.IntegrationsServerResourceInfo{Info: &models.IntegrationsServerInfo{ + Status: ec.String("stopped"), + }}}, + want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenIntegrationsServerResources(tt.args.in, tt.args.name) + got := IsIntegrationsServerStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go new file mode 100644 index 000000000..0be4acdf8 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/integrations_server_topology.go @@ -0,0 +1,139 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" +) + +const ( + minimumIntegrationsServerSize = 1024 +) + +func integrationsServerTopologyPayload(ctx context.Context, topology topologyv1.TopologyTF, planModels []*models.IntegrationsServerTopologyElement, index int) (*models.IntegrationsServerTopologyElement, diag.Diagnostics) { + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + var diags diag.Diagnostics + + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) + if err != nil { + diags.AddError("parse topology error", err.Error()) + return nil, diags + } + + elem, err := matchIntegrationsServerTopology(icID, planModels) + if err != nil { + diags.AddError("integrations_server topology payload error", err.Error()) + return nil, diags + } + + if size != nil { + elem.Size = size + } + + if topology.ZoneCount.Value > 0 { + elem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return elem, nil +} + +func matchIntegrationsServerTopology(id string, topologies []*models.IntegrationsServerTopologyElement) (*models.IntegrationsServerTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} + +// DefaultIntegrationsServerTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is smaller than the +// deployment template default, the same is done on the ZoneCount. +func defaultIntegrationsServerTopology(topology []*models.IntegrationsServerTopologyElement) []*models.IntegrationsServerTopologyElement { + for _, t := range topology { + if *t.Size.Value < minimumIntegrationsServerSize { + t.Size.Value = ec.Int32(minimumIntegrationsServerSize) + } + if t.ZoneCount < utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func readIntegrationsServerTopologies(in []*models.IntegrationsServerTopologyElement) (topologyv1.Topologies, error) { + if len(in) == 0 { + return nil, nil + } + + tops := make(topologyv1.Topologies, 0, len(in)) + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + top, err := readIntegrationsServerTopology(model) + if err != nil { + return nil, err + } + + tops = append(tops, *top) + } + + return tops, nil +} + +func readIntegrationsServerTopology(in *models.IntegrationsServerTopologyElement) (*topologyv1.Topology, error) { + var top topologyv1.Topology + + if in.InstanceConfigurationID != "" { + top.InstanceConfigurationId = &in.InstanceConfigurationID + } + + if in.Size != nil { + top.Size = ec.String(util.MemoryToState(*in.Size.Value)) + top.SizeResource = ec.String(*in.Size.Resource) + } + + top.ZoneCount = int(in.ZoneCount) + + return &top, nil +} diff --git a/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go new file mode 100644 index 000000000..9c258afc9 --- /dev/null +++ b/ec/ecresource/deploymentresource/integrationsserver/v2/schema.go @@ -0,0 +1,151 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func IntegrationsServerSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Integrations Server resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + }, + "ref_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-integrations_server"}), + }, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "config": { + Description: `Optionally define the Integrations Server configuration options for the IntegrationsServer Server`, + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Integrations Server nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "debug_enabled": { + Type: types.BoolType, + Description: `Optionally enable debug mode for Integrations Server instances - defaults to false`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: false}), + }, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/kibana/v1/kibana.go b/ec/ecresource/deploymentresource/kibana/v1/kibana.go new file mode 100644 index 000000000..5b9314e98 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v1/kibana.go @@ -0,0 +1,49 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KibanaTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + Topology types.List `tfsdk:"topology"` + Config types.List `tfsdk:"config"` +} + +type Kibana struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + Topology topologyv1.Topologies `tfsdk:"topology"` + Config KibanaConfigs `tfsdk:"config"` +} + +type Kibanas []Kibana + +type KibanaTopologiesTF []*topologyv1.TopologyTF diff --git a/ec/ecresource/deploymentresource/kibana/v1/kibana_config.go b/ec/ecresource/deploymentresource/kibana/v1/kibana_config.go new file mode 100644 index 000000000..41ccb33fc --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v1/kibana_config.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KibanaConfigTF struct { + DockerImage types.String `tfsdk:"docker_image"` + UserSettingsJson types.String `tfsdk:"user_settings_json"` + UserSettingsOverrideJson types.String `tfsdk:"user_settings_override_json"` + UserSettingsYaml types.String `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml types.String `tfsdk:"user_settings_override_yaml"` +} + +type KibanaConfig struct { + DockerImage *string `tfsdk:"docker_image"` + UserSettingsJson *string `tfsdk:"user_settings_json"` + UserSettingsOverrideJson *string `tfsdk:"user_settings_override_json"` + UserSettingsYaml *string `tfsdk:"user_settings_yaml"` + UserSettingsOverrideYaml *string `tfsdk:"user_settings_override_yaml"` +} + +type KibanaConfigs []KibanaConfig diff --git a/ec/ecresource/deploymentresource/kibana/v1/schema.go b/ec/ecresource/deploymentresource/kibana/v1/schema.go new file mode 100644 index 000000000..a28d447ff --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v1/schema.go @@ -0,0 +1,155 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func KibanaSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Kibana resource definition", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + Computed: true, + Optional: true, + }, + "ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-kibana"}), + }, + Computed: true, + Optional: true, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "topology": { + Description: `Optional topology element`, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + Computed: true, + Optional: true, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + }), + }, + "config": { + Optional: true, + Description: `Optionally define the Kibana configuration options for the Kibana Server`, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Kibana nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_config.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_config.go new file mode 100644 index 000000000..bc2684ecc --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_config.go @@ -0,0 +1,98 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "bytes" + "encoding/json" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" +) + +type KibanaConfig v1.KibanaConfig + +func readKibanaConfig(in *models.KibanaConfiguration) (*KibanaConfig, error) { + var cfg KibanaConfig + + if in.UserSettingsYaml != "" { + cfg.UserSettingsYaml = &in.UserSettingsYaml + } + + if in.UserSettingsOverrideYaml != "" { + cfg.UserSettingsOverrideYaml = &in.UserSettingsOverrideYaml + } + + if o := in.UserSettingsJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsJson = ec.String(string(b)) + } + } + + if o := in.UserSettingsOverrideJSON; o != nil { + if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { + cfg.UserSettingsOverrideJson = ec.String(string(b)) + } + } + + if in.DockerImage != "" { + cfg.DockerImage = &in.DockerImage + } + + if cfg == (KibanaConfig{}) { + return nil, nil + } + + return &cfg, nil +} + +func kibanaConfigPayload(cfg *v1.KibanaConfigTF, model *models.KibanaConfiguration) diag.Diagnostics { + var diags diag.Diagnostics + + if cfg == nil { + return nil + } + + if cfg.UserSettingsJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsJson.Value), &model.UserSettingsJSON); err != nil { + diags.AddError("failed expanding kibana user_settings_json", err.Error()) + } + } + + if cfg.UserSettingsOverrideJson.Value != "" { + if err := json.Unmarshal([]byte(cfg.UserSettingsOverrideJson.Value), &model.UserSettingsOverrideJSON); err != nil { + diags.AddError("failed expanding kibana user_settings_override_json", err.Error()) + } + } + + if !cfg.UserSettingsYaml.IsNull() { + model.UserSettingsYaml = cfg.UserSettingsYaml.Value + } + + if !cfg.UserSettingsOverrideYaml.IsNull() { + model.UserSettingsOverrideYaml = cfg.UserSettingsOverrideYaml.Value + } + + if !cfg.DockerImage.IsNull() { + model.DockerImage = cfg.DockerImage.Value + } + + return diags +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go new file mode 100644 index 000000000..f32177aea --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload.go @@ -0,0 +1,126 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v1" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type KibanaTF struct { + ElasticsearchClusterRefId types.String `tfsdk:"elasticsearch_cluster_ref_id"` + RefId types.String `tfsdk:"ref_id"` + ResourceId types.String `tfsdk:"resource_id"` + Region types.String `tfsdk:"region"` + HttpEndpoint types.String `tfsdk:"http_endpoint"` + HttpsEndpoint types.String `tfsdk:"https_endpoint"` + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` + Config types.Object `tfsdk:"config"` +} + +func (kibana KibanaTF) payload(ctx context.Context, payload models.KibanaPayload) (*models.KibanaPayload, diag.Diagnostics) { + var diags diag.Diagnostics + + if !kibana.ElasticsearchClusterRefId.IsNull() { + payload.ElasticsearchClusterRefID = &kibana.ElasticsearchClusterRefId.Value + } + + if !kibana.RefId.IsNull() { + payload.RefID = &kibana.RefId.Value + } + + if kibana.Region.Value != "" { + payload.Region = &kibana.Region.Value + } + + if !kibana.Config.IsNull() && !kibana.Config.IsUnknown() { + var config *v1.KibanaConfigTF + + ds := tfsdk.ValueAs(ctx, kibana.Config, &config) + + diags.Append(ds...) + + if !ds.HasError() { + diags.Append(kibanaConfigPayload(config, payload.Plan.Kibana)...) + } + } + + topologyTF := topologyv1.TopologyTF{ + InstanceConfigurationId: kibana.InstanceConfigurationId, + Size: kibana.Size, + SizeResource: kibana.SizeResource, + ZoneCount: kibana.ZoneCount, + } + + topologyPayload, ds := kibanaTopologyPayload(ctx, topologyTF, defaultKibanaTopology(payload.Plan.ClusterTopology), 0) + + diags.Append(ds...) + + if !ds.HasError() && topologyPayload != nil { + payload.Plan.ClusterTopology = []*models.KibanaClusterTopologyElement{topologyPayload} + } + + return &payload, diags +} + +func KibanaPayload(ctx context.Context, kibanaObj types.Object, template *models.DeploymentTemplateInfoV2) (*models.KibanaPayload, diag.Diagnostics) { + var kibanaTF *KibanaTF + + var diags diag.Diagnostics + + if diags = tfsdk.ValueAs(ctx, kibanaObj, &kibanaTF); diags.HasError() { + return nil, diags + } + + if kibanaTF == nil { + return nil, nil + } + + templatePlayload := payloadFromTemplate(template) + + if templatePlayload == nil { + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + return nil, diags + } + + payload, diags := kibanaTF.payload(ctx, *templatePlayload) + + if diags.HasError() { + return nil, diags + } + + return payload, nil +} + +// payloadFromTemplate returns the KibanaPayload from a deployment +// template or an empty version of the payload. +func payloadFromTemplate(res *models.DeploymentTemplateInfoV2) *models.KibanaPayload { + if res == nil || len(res.DeploymentTemplate.Resources.Kibana) == 0 { + return nil + } + return res.DeploymentTemplate.Resources.Kibana[0] +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go new file mode 100644 index 000000000..5ce453f6c --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_payload_test.go @@ -0,0 +1,250 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/testutil" +) + +func Test_KibanaPayload(t *testing.T) { + tplPath := "../../testdata/template-aws-io-optimized-v2.json" + tpl := func() *models.DeploymentTemplateInfoV2 { + return testutil.ParseDeploymentTemplate(t, tplPath) + } + type args struct { + kibana *Kibana + tpl *models.DeploymentTemplateInfoV2 + } + tests := []struct { + name string + args args + want *models.KibanaPayload + diags diag.Diagnostics + }{ + { + name: "returns nil when there's no resources", + }, + { + name: "parses a kibana resource with topology", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + want: &models.KibanaPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(2048), + }, + }, + }, + }, + }, + }, + { + name: "parses a kibana resource with incorrect instance_configuration_id", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("gcp.some.config"), + Size: ec.String("2g"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("kibana topology payload error", `kibana topology: invalid instance_configuration_id: "gcp.some.config" doesn't match any of the deployment template instance configurations`) + return diags + }(), + }, + { + name: "parses a kibana resource without topology", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + }, + }, + want: &models.KibanaPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(1024), + }, + }, + }, + }, + }, + }, + { + name: "parses a kibana resource with a topology but no instance_configuration_id", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Size: ec.String("4g"), + }, + }, + want: &models.KibanaPayload{ + + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("main-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{}, + ClusterTopology: []*models.KibanaClusterTopologyElement{ + { + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }, + }, + }, + }, + }, + { + name: "parses a kibana resource with topology and settings", + args: args{ + tpl: tpl(), + kibana: &Kibana{ + RefId: ec.String("secondary-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + Config: &KibanaConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: override"), + UserSettingsJson: ec.String(`{"some.setting":"value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting":"override"}`), + }, + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("4g"), + ZoneCount: 1, + }, + }, + want: &models.KibanaPayload{ + ElasticsearchClusterRefID: ec.String("somerefid"), + Region: ec.String("some-region"), + RefID: ec.String("secondary-kibana"), + Plan: &models.KibanaClusterPlan{ + Kibana: &models.KibanaConfiguration{ + UserSettingsYaml: "some.setting: value", + UserSettingsOverrideYaml: "some.setting: override", + UserSettingsJSON: map[string]interface{}{ + "some.setting": "value", + }, + UserSettingsOverrideJSON: map[string]interface{}{ + "some.setting": "override", + }, + }, + ClusterTopology: []*models.KibanaClusterTopologyElement{{ + ZoneCount: 1, + InstanceConfigurationID: "aws.kibana.r5d", + Size: &models.TopologySize{ + Resource: ec.String("memory"), + Value: ec.Int32(4096), + }, + }}, + }, + }, + }, + { + name: "tries to parse an kibana resource when the template doesn't have a kibana instance set.", + args: args{ + tpl: nil, + kibana: &Kibana{ + RefId: ec.String("tertiary-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + ElasticsearchClusterRefId: ec.String("somerefid"), + InstanceConfigurationId: ec.String("aws.kibana.r5d"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("kibana payload error", "kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") + return diags + }(), + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var kibana types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.kibana, KibanaSchema().FrameworkType(), &kibana) + assert.Nil(t, diags) + + if got, diags := KibanaPayload(context.Background(), kibana, tt.args.tpl); tt.diags != nil { + assert.Equal(t, tt.diags, diags) + } else { + assert.Nil(t, diags) + assert.Equal(t, tt.want, got) + } + }) + } +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go new file mode 100644 index 000000000..3c5b47dd8 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read.go @@ -0,0 +1,99 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" +) + +type Kibana struct { + ElasticsearchClusterRefId *string `tfsdk:"elasticsearch_cluster_ref_id"` + RefId *string `tfsdk:"ref_id"` + ResourceId *string `tfsdk:"resource_id"` + Region *string `tfsdk:"region"` + HttpEndpoint *string `tfsdk:"http_endpoint"` + HttpsEndpoint *string `tfsdk:"https_endpoint"` + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` + Config *KibanaConfig `tfsdk:"config"` +} + +func ReadKibanas(in []*models.KibanaResourceInfo) (*Kibana, error) { + for _, model := range in { + if util.IsCurrentKibanaPlanEmpty(model) || IsKibanaStopped(model) { + continue + } + + kibana, err := readKibana(model) + if err != nil { + return nil, err + } + + return kibana, nil + } + + return nil, nil +} + +func readKibana(in *models.KibanaResourceInfo) (*Kibana, error) { + var kibana Kibana + + kibana.RefId = in.RefID + + kibana.ResourceId = in.Info.ClusterID + + kibana.Region = in.Region + + plan := in.Info.PlanInfo.Current.Plan + var err error + + topologies, err := readKibanaTopologies(plan.ClusterTopology) + if err != nil { + return nil, err + } + + if len(topologies) > 0 { + kibana.InstanceConfigurationId = topologies[0].InstanceConfigurationId + kibana.Size = topologies[0].Size + kibana.SizeResource = topologies[0].SizeResource + kibana.ZoneCount = topologies[0].ZoneCount + } + + kibana.ElasticsearchClusterRefId = in.ElasticsearchClusterRefID + + kibana.HttpEndpoint, kibana.HttpsEndpoint = converters.ExtractEndpoints(in.Info.Metadata) + + config, err := readKibanaConfig(plan.Kibana) + if err != nil { + return nil, err + } + + kibana.Config = config + + return &kibana, nil +} + +// IsKibanaStopped returns true if the resource is stopped. +func IsKibanaStopped(res *models.KibanaResourceInfo) bool { + return res == nil || res.Info == nil || res.Info.Status == nil || + *res.Info.Status == "stopped" +} diff --git a/ec/ecresource/deploymentresource/kibana_flatteners_test.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go similarity index 58% rename from ec/ecresource/deploymentresource/kibana_flatteners_test.go rename to ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go index 48b1a105a..8b44a474b 100644 --- a/ec/ecresource/deploymentresource/kibana_flatteners_test.go +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_read_test.go @@ -15,31 +15,34 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) -func Test_flattenKibanaResources(t *testing.T) { +func Test_ReadKibana(t *testing.T) { type args struct { - in []*models.KibanaResourceInfo - name string + in []*models.KibanaResourceInfo } tests := []struct { name string args args - want []interface{} + want *Kibana }{ { name: "empty resource list returns empty list", args: args{in: []*models.KibanaResourceInfo{}}, - want: []interface{}{}, + want: nil, }, { name: "empty current plan returns empty list", @@ -52,48 +55,11 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []interface{}{}, + want: nil, }, { name: "parses the kibana resource", args: args{in: []*models.KibanaResourceInfo{ - { - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - ElasticsearchClusterRefID: ec.String("main-elasticsearch"), - Info: &models.KibanaClusterInfo{ - ClusterID: &mock.ValidClusterID, - ClusterName: ec.String("some-kibana-name"), - Region: "some-region", - Status: ec.String("started"), - Metadata: &models.ClusterMetadataInfo{ - Endpoint: "kibanaresource.cloud.elastic.co", - Ports: &models.ClusterMetadataPortInfo{ - HTTP: ec.Int32(9200), - HTTPS: ec.Int32(9243), - }, - }, - PlanInfo: &models.KibanaClusterPlansInfo{ - Current: &models.KibanaClusterPlanInfo{ - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - Version: "7.7.0", - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r4", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - }, { Region: ec.String("some-region"), RefID: ec.String("main-kibana"), @@ -175,49 +141,66 @@ func Test_flattenKibanaResources(t *testing.T) { }, }, }}, - want: []interface{}{ - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://kibanaresource.cloud.elastic.co:9200", - "https_endpoint": "https://kibanaresource.cloud.elastic.co:9243", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }, - }, - }, - map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "http_endpoint": "http://kibanaresource.cloud.elastic.co:9200", - "https_endpoint": "https://kibanaresource.cloud.elastic.co:9243", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: override", - "user_settings_json": `{"some.setting":"value"}`, - "user_settings_override_json": `{"some.setting":"override"}`, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r4", - "size": "1g", - "size_resource": "memory", - "zone_count": int32(1), - }}, + want: &Kibana{ + ElasticsearchClusterRefId: ec.String("main-elasticsearch"), + RefId: ec.String("main-kibana"), + ResourceId: &mock.ValidClusterID, + Region: ec.String("some-region"), + HttpEndpoint: ec.String("http://kibanaresource.cloud.elastic.co:9200"), + HttpsEndpoint: ec.String("https://kibanaresource.cloud.elastic.co:9243"), + Config: &KibanaConfig{ + UserSettingsYaml: ec.String("some.setting: value"), + UserSettingsOverrideYaml: ec.String("some.setting: override"), + UserSettingsJson: ec.String(`{"some.setting":"value"}`), + UserSettingsOverrideJson: ec.String(`{"some.setting":"override"}`), }, + InstanceConfigurationId: ec.String("aws.kibana.r4"), + Size: ec.String("1g"), + SizeResource: ec.String("memory"), + ZoneCount: 1, }, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := flattenKibanaResources(tt.args.in, tt.args.name) + kibana, err := ReadKibanas(tt.args.in) + assert.Nil(t, err) + assert.Equal(t, tt.want, kibana) + + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), kibana, KibanaSchema().FrameworkType(), &obj) + assert.Nil(t, diags) + }) + } +} + +func Test_IsKibanaResourceStopped(t *testing.T) { + type args struct { + res *models.KibanaResourceInfo + } + tests := []struct { + name string + args args + want bool + }{ + { + name: "started resource returns false", + args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ + Status: ec.String("started"), + }}}, + want: false, + }, + { + name: "stopped resource returns true", + args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ + Status: ec.String("stopped"), + }}}, + want: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := IsKibanaStopped(tt.args.res) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go b/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go new file mode 100644 index 000000000..aeda2d978 --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/kibana_topology.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "fmt" + + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + topologyv1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/topology/v1" + "github.com/elastic/terraform-provider-ec/ec/internal/converters" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/utils" +) + +const ( + minimumKibanaSize = 1024 +) + +func readKibanaTopology(in *models.KibanaClusterTopologyElement) (*topologyv1.Topology, error) { + var top topologyv1.Topology + + if in.InstanceConfigurationID != "" { + top.InstanceConfigurationId = &in.InstanceConfigurationID + } + + if in.Size != nil { + top.Size = ec.String(util.MemoryToState(*in.Size.Value)) + top.SizeResource = ec.String(*in.Size.Resource) + } + + top.ZoneCount = int(in.ZoneCount) + + return &top, nil +} + +type KibanaTopologiesTF []*topologyv1.TopologyTF + +func readKibanaTopologies(in []*models.KibanaClusterTopologyElement) (topologyv1.Topologies, error) { + if len(in) == 0 { + return nil, nil + } + + tops := make(topologyv1.Topologies, 0, len(in)) + for _, model := range in { + if model.Size == nil || model.Size.Value == nil || *model.Size.Value == 0 { + continue + } + + top, err := readKibanaTopology(model) + if err != nil { + return nil, err + } + + tops = append(tops, *top) + } + + return tops, nil +} + +// defaultKibnaTopology iterates over all the templated topology elements and +// sets the size to the default when the template size is greater than the +// local terraform default, the same is done on the ZoneCount. +func defaultKibanaTopology(topology []*models.KibanaClusterTopologyElement) []*models.KibanaClusterTopologyElement { + for _, t := range topology { + if *t.Size.Value > minimumKibanaSize { + t.Size.Value = ec.Int32(minimumKibanaSize) + } + if t.ZoneCount > utils.MinimumZoneCount { + t.ZoneCount = utils.MinimumZoneCount + } + } + + return topology +} + +func kibanaTopologyPayload(ctx context.Context, topology v1.TopologyTF, planModels []*models.KibanaClusterTopologyElement, index int) (*models.KibanaClusterTopologyElement, diag.Diagnostics) { + + icID := topology.InstanceConfigurationId.Value + + // When a topology element is set but no instance_configuration_id + // is set, then obtain the instance_configuration_id from the topology + // element. + if icID == "" && index < len(planModels) { + icID = planModels[index].InstanceConfigurationID + } + + size, err := converters.ParseTopologySizeTypes(topology.Size, topology.SizeResource) + + var diags diag.Diagnostics + if err != nil { + diags.AddError("size parsing error", err.Error()) + return nil, diags + } + + elem, err := matchKibanaTopology(icID, planModels) + if err != nil { + diags.AddError("kibana topology payload error", err.Error()) + return nil, diags + } + + if size != nil { + elem.Size = size + } + + if topology.ZoneCount.Value > 0 { + elem.ZoneCount = int32(topology.ZoneCount.Value) + } + + return elem, nil +} + +func matchKibanaTopology(id string, topologies []*models.KibanaClusterTopologyElement) (*models.KibanaClusterTopologyElement, error) { + for _, t := range topologies { + if t.InstanceConfigurationID == id { + return t, nil + } + } + return nil, fmt.Errorf( + `kibana topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, + id, + ) +} diff --git a/ec/ecresource/deploymentresource/kibana/v2/schema.go b/ec/ecresource/deploymentresource/kibana/v2/schema.go new file mode 100644 index 000000000..a9bcb885d --- /dev/null +++ b/ec/ecresource/deploymentresource/kibana/v2/schema.go @@ -0,0 +1,142 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func KibanaSchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional Kibana resource definition", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "elasticsearch_cluster_ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-elasticsearch"}), + }, + Computed: true, + Optional: true, + }, + "ref_id": { + Type: types.StringType, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "main-kibana"}), + }, + Computed: true, + Optional: true, + }, + "resource_id": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "region": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "http_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "https_endpoint": { + Type: types.StringType, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "instance_configuration_id": { + Type: types.StringType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "size_resource": { + Type: types.StringType, + Description: `Optional size type, defaults to "memory".`, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.String{Value: "memory"}), + }, + Computed: true, + Optional: true, + }, + "zone_count": { + Type: types.Int64Type, + Computed: true, + Optional: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + resource.UseStateForUnknown(), + }, + }, + "config": { + Optional: true, + Description: `Optionally define the Kibana configuration options for the Kibana Server`, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "docker_image": { + Type: types.StringType, + Description: "Optionally override the docker image the Kibana nodes will use. Note that this field will only work for internal users only.", + Optional: true, + }, + "user_settings_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_json": { + Type: types.StringType, + Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, + Optional: true, + }, + "user_settings_override_yaml": { + Type: types.StringType, + Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, + Optional: true, + }, + }), + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/kibana_expanders.go b/ec/ecresource/deploymentresource/kibana_expanders.go deleted file mode 100644 index 82dafc65e..000000000 --- a/ec/ecresource/deploymentresource/kibana_expanders.go +++ /dev/null @@ -1,203 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// expandKibanaResources expands the flattened kibana resources into its models. -func expandKibanaResources(kibanas []interface{}, tpl *models.KibanaPayload) ([]*models.KibanaPayload, error) { - if len(kibanas) == 0 { - return nil, nil - } - - if tpl == nil { - return nil, errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana") - } - - result := make([]*models.KibanaPayload, 0, len(kibanas)) - for _, raw := range kibanas { - resResource, err := expandKibanaResource(raw, tpl) - if err != nil { - return nil, err - } - result = append(result, resResource) - } - - return result, nil -} - -func expandKibanaResource(raw interface{}, res *models.KibanaPayload) (*models.KibanaPayload, error) { - kibana := raw.(map[string]interface{}) - - if esRefID, ok := kibana["elasticsearch_cluster_ref_id"].(string); ok { - res.ElasticsearchClusterRefID = ec.String(esRefID) - } - - if refID, ok := kibana["ref_id"].(string); ok { - res.RefID = ec.String(refID) - } - - if region, ok := kibana["region"].(string); ok && region != "" { - res.Region = ec.String(region) - } - - if cfg, ok := kibana["config"]; ok { - if err := expandKibanaConfig(cfg, res.Plan.Kibana); err != nil { - return nil, err - } - } - - if rt, ok := kibana["topology"].([]interface{}); ok && len(rt) > 0 { - topology, err := expandKibanaTopology(rt, res.Plan.ClusterTopology) - if err != nil { - return nil, err - } - res.Plan.ClusterTopology = topology - } else { - res.Plan.ClusterTopology = defaultKibanaTopology(res.Plan.ClusterTopology) - } - - return res, nil -} - -func expandKibanaTopology(rawTopologies []interface{}, topologies []*models.KibanaClusterTopologyElement) ([]*models.KibanaClusterTopologyElement, error) { - var res = make([]*models.KibanaClusterTopologyElement, 0, len(rawTopologies)) - for i, rawTop := range rawTopologies { - var topology, ok = rawTop.(map[string]interface{}) - if !ok { - continue - } - - var icID string - if id, ok := topology["instance_configuration_id"].(string); ok { - icID = id - } - // When a topology element is set but no instance_configuration_id - // is set, then obtain the instance_configuration_id from the topology - // element. - if t := defaultKibanaTopology(topologies); icID == "" && len(t) > i { - icID = t[i].InstanceConfigurationID - } - size, err := util.ParseTopologySize(topology) - if err != nil { - return nil, err - } - - elem, err := matchKibanaTopology(icID, topologies) - if err != nil { - return nil, err - } - if size != nil { - elem.Size = size - } - - if zones, ok := topology["zone_count"].(int); ok && zones > 0 { - elem.ZoneCount = int32(zones) - } - - res = append(res, elem) - } - - return res, nil -} - -func expandKibanaConfig(raw interface{}, res *models.KibanaConfiguration) error { - for _, rawCfg := range raw.([]interface{}) { - cfg, ok := rawCfg.(map[string]interface{}) - if !ok { - continue - } - if settings, ok := cfg["user_settings_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsJSON); err != nil { - return fmt.Errorf("failed expanding kibana user_settings_json: %w", err) - } - } - if settings, ok := cfg["user_settings_override_json"].(string); ok && settings != "" { - if err := json.Unmarshal([]byte(settings), &res.UserSettingsOverrideJSON); err != nil { - return fmt.Errorf("failed expanding kibana user_settings_override_json: %w", err) - } - } - if settings, ok := cfg["user_settings_yaml"].(string); ok && settings != "" { - res.UserSettingsYaml = settings - } - if settings, ok := cfg["user_settings_override_yaml"].(string); ok && settings != "" { - res.UserSettingsOverrideYaml = settings - } - - if v, ok := cfg["docker_image"].(string); ok && v != "" { - res.DockerImage = v - } - } - - return nil -} - -// defaultApmTopology iterates over all the templated topology elements and -// sets the size to the default when the template size is greater than the -// local terraform default, the same is done on the ZoneCount. -func defaultKibanaTopology(topology []*models.KibanaClusterTopologyElement) []*models.KibanaClusterTopologyElement { - for _, t := range topology { - if *t.Size.Value > minimumKibanaSize { - t.Size.Value = ec.Int32(minimumKibanaSize) - } - if t.ZoneCount > minimumZoneCount { - t.ZoneCount = minimumZoneCount - } - } - - return topology -} - -func matchKibanaTopology(id string, topologies []*models.KibanaClusterTopologyElement) (*models.KibanaClusterTopologyElement, error) { - for _, t := range topologies { - if t.InstanceConfigurationID == id { - return t, nil - } - } - return nil, fmt.Errorf( - `kibana topology: invalid instance_configuration_id: "%s" doesn't match any of the deployment template instance configurations`, - id, - ) -} - -// kibanaResource returns the KibanaPayload from a deployment -// template or an empty version of the payload. -func kibanaResource(res *models.DeploymentTemplateInfoV2) *models.KibanaPayload { - if len(res.DeploymentTemplate.Resources.Kibana) == 0 { - return nil - } - return res.DeploymentTemplate.Resources.Kibana[0] -} - -// kibanaResourceFromUpdate returns the KibanaPayload from a deployment -// update request or an empty version of the payload. -func kibanaResourceFromUpdate(res *models.DeploymentUpdateResources) *models.KibanaPayload { - if len(res.Kibana) == 0 { - return nil - } - return res.Kibana[0] -} diff --git a/ec/ecresource/deploymentresource/kibana_expanders_test.go b/ec/ecresource/deploymentresource/kibana_expanders_test.go deleted file mode 100644 index d9b1328e5..000000000 --- a/ec/ecresource/deploymentresource/kibana_expanders_test.go +++ /dev/null @@ -1,321 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "errors" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" -) - -func Test_expandKibanaResources(t *testing.T) { - tplPath := "testdata/template-aws-io-optimized-v2.json" - tpl := func() *models.KibanaPayload { - return kibanaResource(parseDeploymentTemplate(t, - tplPath, - )) - } - type args struct { - ess []interface{} - tpl *models.KibanaPayload - } - tests := []struct { - name string - args args - want []*models.KibanaPayload - err error - }{ - { - name: "returns nil when there's no resources", - }, - { - name: "parses a kibana resource with topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "2g", - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(2048), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with explicit nils", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": nil, - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": nil, - "zone_count": 1, - }}, - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("us-east-1"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with incorrect instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "gcp.some.config", - "size": "2g", - "zone_count": 1, - }}, - }, - }, - }, - err: errors.New(`kibana topology: invalid instance_configuration_id: "gcp.some.config" doesn't match any of the deployment template instance configurations`), - }, - { - name: "parses a kibana resource without topology", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(1024), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with a topology but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{map[string]interface{}{ - "size": "4g", - }}, - }, - }, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("main-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{}, - ClusterTopology: []*models.KibanaClusterTopologyElement{ - { - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }, - }, - }, - }, - }, - }, - { - name: "parses a kibana resource with multiple topologies but no instance_configuration_id", - args: args{ - tpl: tpl(), - ess: []interface{}{ - map[string]interface{}{ - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "elasticsearch_cluster_ref_id": "somerefid", - "topology": []interface{}{ - map[string]interface{}{ - "size": "4g", - }, map[string]interface{}{ - "size": "4g", - }, - }, - }, - }, - }, - err: errors.New("kibana topology: invalid instance_configuration_id: \"\" doesn't match any of the deployment template instance configurations"), - }, - { - name: "parses a kibana resource with topology and settings", - args: args{ - tpl: tpl(), - ess: []interface{}{map[string]interface{}{ - "ref_id": "secondary-kibana", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: override", - "user_settings_json": `{"some.setting":"value"}`, - "user_settings_override_json": `{"some.setting":"override"}`, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "4g", - "zone_count": 1, - }}, - }}, - }, - want: []*models.KibanaPayload{ - { - ElasticsearchClusterRefID: ec.String("somerefid"), - Region: ec.String("some-region"), - RefID: ec.String("secondary-kibana"), - Plan: &models.KibanaClusterPlan{ - Kibana: &models.KibanaConfiguration{ - UserSettingsYaml: "some.setting: value", - UserSettingsOverrideYaml: "some.setting: override", - UserSettingsJSON: map[string]interface{}{ - "some.setting": "value", - }, - UserSettingsOverrideJSON: map[string]interface{}{ - "some.setting": "override", - }, - }, - ClusterTopology: []*models.KibanaClusterTopologyElement{{ - ZoneCount: 1, - InstanceConfigurationID: "aws.kibana.r5d", - Size: &models.TopologySize{ - Resource: ec.String("memory"), - Value: ec.Int32(4096), - }, - }}, - }, - }, - }, - }, - { - name: "tries to parse an kibana resource when the template doesn't have a kibana instance set.", - args: args{ - tpl: nil, - ess: []interface{}{map[string]interface{}{ - "ref_id": "tertiary-kibana", - "elasticsearch_cluster_ref_id": "somerefid", - "resource_id": mock.ValidClusterID, - "region": "some-region", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "size_resource": "memory", - "zone_count": 1, - }}, - }}, - }, - err: errors.New("kibana specified but deployment template is not configured for it. Use a different template if you wish to add kibana"), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := expandKibanaResources(tt.args.ess, tt.args.tpl) - if !assert.Equal(t, tt.err, err) { - t.Error(err) - } - - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/kibana_flatteners.go b/ec/ecresource/deploymentresource/kibana_flatteners.go deleted file mode 100644 index fb9a2ff87..000000000 --- a/ec/ecresource/deploymentresource/kibana_flatteners.go +++ /dev/null @@ -1,134 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "encoding/json" - - "github.com/elastic/cloud-sdk-go/pkg/models" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenKibanaResources takes the kibana resource models and returns them flattened. -func flattenKibanaResources(in []*models.KibanaResourceInfo, name string) []interface{} { - result := make([]interface{}, 0, len(in)) - for _, res := range in { - m := make(map[string]interface{}) - if util.IsCurrentKibanaPlanEmpty(res) || isKibanaResourceStopped(res) { - continue - } - - if res.RefID != nil && *res.RefID != "" { - m["ref_id"] = *res.RefID - } - - if res.Info.ClusterID != nil && *res.Info.ClusterID != "" { - m["resource_id"] = *res.Info.ClusterID - } - - if res.Region != nil { - m["region"] = *res.Region - } - - plan := res.Info.PlanInfo.Current.Plan - if topology := flattenKibanaTopology(plan); len(topology) > 0 { - m["topology"] = topology - } - - if res.ElasticsearchClusterRefID != nil { - m["elasticsearch_cluster_ref_id"] = *res.ElasticsearchClusterRefID - } - - for k, v := range util.FlattenClusterEndpoint(res.Info.Metadata) { - m[k] = v - } - - if c := flattenKibanaConfig(plan.Kibana); len(c) > 0 { - m["config"] = c - } - - result = append(result, m) - } - - return result -} - -func flattenKibanaTopology(plan *models.KibanaClusterPlan) []interface{} { - var result = make([]interface{}, 0, len(plan.ClusterTopology)) - for _, topology := range plan.ClusterTopology { - var m = make(map[string]interface{}) - if topology.Size == nil || topology.Size.Value == nil || *topology.Size.Value == 0 { - continue - } - - if topology.InstanceConfigurationID != "" { - m["instance_configuration_id"] = topology.InstanceConfigurationID - } - - if topology.Size != nil { - m["size"] = util.MemoryToState(*topology.Size.Value) - m["size_resource"] = *topology.Size.Resource - - } - - m["zone_count"] = topology.ZoneCount - - result = append(result, m) - } - - return result -} - -func flattenKibanaConfig(cfg *models.KibanaConfiguration) []interface{} { - var m = make(map[string]interface{}) - if cfg == nil { - return nil - } - - if cfg.UserSettingsYaml != "" { - m["user_settings_yaml"] = cfg.UserSettingsYaml - } - - if cfg.UserSettingsOverrideYaml != "" { - m["user_settings_override_yaml"] = cfg.UserSettingsOverrideYaml - } - - if o := cfg.UserSettingsJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_json"] = string(b) - } - } - - if o := cfg.UserSettingsOverrideJSON; o != nil { - if b, _ := json.Marshal(o); len(b) > 0 && !bytes.Equal([]byte("{}"), b) { - m["user_settings_override_json"] = string(b) - } - } - - if cfg.DockerImage != "" { - m["docker_image"] = cfg.DockerImage - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} diff --git a/ec/ecresource/deploymentresource/observability.go b/ec/ecresource/deploymentresource/observability.go deleted file mode 100644 index 5442fc116..000000000 --- a/ec/ecresource/deploymentresource/observability.go +++ /dev/null @@ -1,117 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" -) - -// flattenObservability parses a deployment's observability settings. -func flattenObservability(settings *models.DeploymentSettings) []interface{} { - if settings == nil || settings.Observability == nil { - return nil - } - - var m = make(map[string]interface{}) - - // We are only accepting a single deployment ID and refID for both logs and metrics. - // If either of them is not nil the deployment ID and refID will be filled. - if settings.Observability.Metrics != nil { - m["deployment_id"] = settings.Observability.Metrics.Destination.DeploymentID - m["ref_id"] = settings.Observability.Metrics.Destination.RefID - m["metrics"] = true - } - - if settings.Observability.Logging != nil { - m["deployment_id"] = settings.Observability.Logging.Destination.DeploymentID - m["ref_id"] = settings.Observability.Logging.Destination.RefID - m["logs"] = true - } - - if len(m) == 0 { - return nil - } - - return []interface{}{m} -} - -func expandObservability(raw []interface{}, client *api.API) (*models.DeploymentObservabilitySettings, error) { - if len(raw) == 0 { - return nil, nil - } - - var req models.DeploymentObservabilitySettings - - for _, rawObs := range raw { - var obs = rawObs.(map[string]interface{}) - - depID, ok := obs["deployment_id"].(string) - if !ok { - return nil, nil - } - - refID, ok := obs["ref_id"].(string) - if depID == "self" { - // For self monitoring, the refID is not mandatory - if !ok { - refID = "" - } - } else if !ok || refID == "" { - // Since ms-77, the refID is optional. - // To not break ECE users with older versions, we still pre-calculate the refID here - params := deploymentapi.PopulateRefIDParams{ - Kind: util.Elasticsearch, - API: client, - DeploymentID: depID, - RefID: ec.String(""), - } - - if err := deploymentapi.PopulateRefID(params); err != nil { - return nil, fmt.Errorf("observability ref_id auto discovery: %w", err) - } - - refID = *params.RefID - } - - if logging, ok := obs["logs"].(bool); ok && logging { - req.Logging = &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: ec.String(depID), - RefID: refID, - }, - } - } - - if metrics, ok := obs["metrics"].(bool); ok && metrics { - req.Metrics = &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: ec.String(depID), - RefID: refID, - }, - } - } - } - - return &req, nil -} diff --git a/ec/ecdatasource/deploymentdatasource/flatteners_tags.go b/ec/ecresource/deploymentresource/observability/v1/observability.go similarity index 61% rename from ec/ecdatasource/deploymentdatasource/flatteners_tags.go rename to ec/ecresource/deploymentresource/observability/v1/observability.go index 32964d512..6075e336b 100644 --- a/ec/ecdatasource/deploymentdatasource/flatteners_tags.go +++ b/ec/ecresource/deploymentresource/observability/v1/observability.go @@ -15,24 +15,24 @@ // specific language governing permissions and limitations // under the License. -package deploymentdatasource +package v1 import ( - "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// flattenTags takes in Deployment Metadata resource models and returns its -// Tags in flattened form. -func flattenTags(metadata *models.DeploymentMetadata) map[string]interface{} { - if metadata == nil || metadata.Tags == nil { - return nil - } +type ObservabilityTF struct { + DeploymentId types.String `tfsdk:"deployment_id"` + RefId types.String `tfsdk:"ref_id"` + Logs types.Bool `tfsdk:"logs"` + Metrics types.Bool `tfsdk:"metrics"` +} - var result = make(map[string]interface{}) - for _, res := range metadata.Tags { - if res.Key != nil { - result[*res.Key] = *res.Value - } - } - return result +type Observability struct { + DeploymentId *string `tfsdk:"deployment_id"` + RefId *string `tfsdk:"ref_id"` + Logs bool `tfsdk:"logs"` + Metrics bool `tfsdk:"metrics"` } + +type Observabilities []Observability diff --git a/ec/ecresource/deploymentresource/observability/v1/schema.go b/ec/ecresource/deploymentresource/observability/v1/schema.go new file mode 100644 index 000000000..9196858fa --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v1/schema.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v1 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework-validators/listvalidator" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ObservabilitySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", + Optional: true, + Validators: []tfsdk.AttributeValidator{listvalidator.SizeAtMost(1)}, + Attributes: tfsdk.ListNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Required: true, + }, + "ref_id": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "logs": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + }, + }, + "metrics": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + }, + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_payload.go b/ec/ecresource/deploymentresource/observability/v2/observability_payload.go new file mode 100644 index 000000000..1fbfd62c5 --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/observability_payload.go @@ -0,0 +1,93 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +type ObservabilityTF = v1.ObservabilityTF + +func ObservabilityPayload(ctx context.Context, obsObj types.Object, client *api.API) (*models.DeploymentObservabilitySettings, diag.Diagnostics) { + var observability *ObservabilityTF + + if diags := tfsdk.ValueAs(ctx, obsObj, &observability); diags.HasError() { + return nil, nil + } + + if observability == nil { + return nil, nil + } + + var payload models.DeploymentObservabilitySettings + + if observability.DeploymentId.Value == "" { + return nil, nil + } + + refID := observability.RefId.Value + + if observability.DeploymentId.Value != "self" && refID == "" { + // Since ms-77, the refID is optional. + // To not break ECE users with older versions, we still pre-calculate the refID here + params := deploymentapi.PopulateRefIDParams{ + Kind: util.Elasticsearch, + API: client, + DeploymentID: observability.DeploymentId.Value, + RefID: ec.String(""), + } + + if err := deploymentapi.PopulateRefID(params); err != nil { + var diags diag.Diagnostics + diags.AddError("observability ref_id auto discovery", err.Error()) + return nil, diags + } + + refID = *params.RefID + } + + if observability.Logs.Value { + payload.Logging = &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: ec.String(observability.DeploymentId.Value), + RefID: refID, + }, + } + } + + if observability.Metrics.Value { + payload.Metrics = &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: ec.String(observability.DeploymentId.Value), + RefID: refID, + }, + } + } + + return &payload, nil +} diff --git a/ec/ecresource/deploymentresource/observability_test.go b/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go similarity index 59% rename from ec/ecresource/deploymentresource/observability_test.go rename to ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go index 3fcf482a0..f4fd06fd7 100644 --- a/ec/ecresource/deploymentresource/observability_test.go +++ b/ec/ecresource/deploymentresource/observability/v2/observability_payload_test.go @@ -15,112 +15,25 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package v2 import ( + "context" "testing" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) -func TestFlattenObservability(t *testing.T) { - type args struct { - settings *models.DeploymentSettings - } - tests := []struct { - name string - args args - want []interface{} - }{ - { - name: "flattens no observability settings when empty", - args: args{}, - }, - { - name: "flattens no observability settings when empty", - args: args{settings: &models.DeploymentSettings{}}, - }, - { - name: "flattens no observability settings when empty", - args: args{settings: &models.DeploymentSettings{Observability: &models.DeploymentObservabilitySettings{}}}, - }, - { - name: "flattens observability settings", - args: args{settings: &models.DeploymentSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - }}, - }, - { - name: "flattens observability settings", - args: args{settings: &models.DeploymentSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": true, - }}, - }, - { - name: "flattens observability settings", - args: args{settings: &models.DeploymentSettings{ - Observability: &models.DeploymentObservabilitySettings{ - Logging: &models.DeploymentLoggingSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - Metrics: &models.DeploymentMetricsSettings{ - Destination: &models.ObservabilityAbsoluteDeployment{ - DeploymentID: &mock.ValidClusterID, - RefID: "main-elasticsearch", - }, - }, - }, - }}, - want: []interface{}{map[string]interface{}{ - "deployment_id": &mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := flattenObservability(tt.args.settings) - assert.Equal(t, tt.want, got) - }) - } -} - -func TestExpandObservability(t *testing.T) { +func Test_observabilityPayload(t *testing.T) { type args struct { - v []interface{} + observability *Observability *api.API } tests := []struct { @@ -135,12 +48,12 @@ func TestExpandObservability(t *testing.T) { { name: "expands all observability settings with given refID", args: args{ - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": true, - "logs": true, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Metrics: true, + Logs: true, + }, }, want: &models.DeploymentObservabilitySettings{ Logging: &models.DeploymentLoggingSettings{ @@ -157,18 +70,6 @@ func TestExpandObservability(t *testing.T) { }, }, }, - { - name: "handles explicit nils", - args: args{ - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "metrics": nil, - "logs": nil, - }}, - }, - want: &models.DeploymentObservabilitySettings{}, - }, { name: "expands all observability settings", args: args{ @@ -186,11 +87,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "metrics": true, - "logs": true, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + Metrics: true, + Logs: true, + }, }, want: &models.DeploymentObservabilitySettings{ Logging: &models.DeploymentLoggingSettings{ @@ -224,11 +125,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "metrics": false, - "logs": true, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + Metrics: false, + Logs: true, + }, }, want: &models.DeploymentObservabilitySettings{ Logging: &models.DeploymentLoggingSettings{ @@ -256,11 +157,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "metrics": true, - "logs": false, - }}, + observability: &Observability{ + DeploymentId: &mock.ValidClusterID, + Metrics: true, + Logs: false, + }, }, want: &models.DeploymentObservabilitySettings{ Metrics: &models.DeploymentMetricsSettings{ @@ -288,11 +189,11 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": "self", - "metrics": true, - "logs": false, - }}, + observability: &Observability{ + DeploymentId: ec.String("self"), + Metrics: true, + Logs: false, + }, }, want: &models.DeploymentObservabilitySettings{ Metrics: &models.DeploymentMetricsSettings{ @@ -320,12 +221,12 @@ func TestExpandObservability(t *testing.T) { }), ), ), - v: []interface{}{map[string]interface{}{ - "deployment_id": "self", - "ref_id": "main-elasticsearch", - "metrics": true, - "logs": false, - }}, + observability: &Observability{ + DeploymentId: ec.String("self"), + RefId: ec.String("main-elasticsearch"), + Metrics: true, + Logs: false, + }, }, want: &models.DeploymentObservabilitySettings{ Metrics: &models.DeploymentMetricsSettings{ @@ -339,7 +240,12 @@ func TestExpandObservability(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, _ := expandObservability(tt.args.v, tt.args.API) + var observability types.Object + diags := tfsdk.ValueFrom(context.Background(), tt.args.observability, ObservabilitySchema().FrameworkType(), &observability) + assert.Nil(t, diags) + + got, diags := ObservabilityPayload(context.Background(), observability, tt.args.API) + assert.Nil(t, diags) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_read.go b/ec/ecresource/deploymentresource/observability/v2/observability_read.go new file mode 100644 index 000000000..fa1c78cbb --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/observability_read.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/cloud-sdk-go/pkg/models" + v1 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/observability/v1" +) + +type Observability = v1.Observability + +func ReadObservability(in *models.DeploymentSettings) (*Observability, error) { + if in == nil || in.Observability == nil { + return nil, nil + } + + var obs Observability + + // We are only accepting a single deployment ID and refID for both logs and metrics. + // If either of them is not nil the deployment ID and refID will be filled. + if in.Observability.Metrics != nil { + if in.Observability.Metrics.Destination.DeploymentID != nil { + obs.DeploymentId = in.Observability.Metrics.Destination.DeploymentID + } + + obs.RefId = &in.Observability.Metrics.Destination.RefID + obs.Metrics = true + } + + if in.Observability.Logging != nil { + if in.Observability.Logging.Destination.DeploymentID != nil { + obs.DeploymentId = in.Observability.Logging.Destination.DeploymentID + } + obs.RefId = &in.Observability.Logging.Destination.RefID + obs.Logs = true + } + + if obs == (Observability{}) { + return nil, nil + } + + return &obs, nil +} diff --git a/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go b/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go new file mode 100644 index 000000000..e6f7aa4e0 --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/observability_read_test.go @@ -0,0 +1,127 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "context" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" +) + +func Test_readObservability(t *testing.T) { + type args struct { + settings *models.DeploymentSettings + } + tests := []struct { + name string + args args + want *Observability + }{ + { + name: "flattens no observability settings when empty", + args: args{}, + }, + { + name: "flattens no observability settings when empty", + args: args{settings: &models.DeploymentSettings{}}, + }, + { + name: "flattens no observability settings when empty", + args: args{settings: &models.DeploymentSettings{Observability: &models.DeploymentObservabilitySettings{}}}, + }, + { + name: "flattens observability settings", + args: args{settings: &models.DeploymentSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }}, + want: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Logs: true, + }, + }, + { + name: "flattens observability settings", + args: args{settings: &models.DeploymentSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }}, + want: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Metrics: true, + }, + }, + { + name: "flattens observability settings", + args: args{settings: &models.DeploymentSettings{ + Observability: &models.DeploymentObservabilitySettings{ + Logging: &models.DeploymentLoggingSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + Metrics: &models.DeploymentMetricsSettings{ + Destination: &models.ObservabilityAbsoluteDeployment{ + DeploymentID: &mock.ValidClusterID, + RefID: "main-elasticsearch", + }, + }, + }, + }}, + want: &Observability{ + DeploymentId: &mock.ValidClusterID, + RefId: ec.String("main-elasticsearch"), + Logs: true, + Metrics: true, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + observability, err := ReadObservability(tt.args.settings) + assert.Nil(t, err) + assert.Equal(t, tt.want, observability) + + var obj types.Object + diags := tfsdk.ValueFrom(context.Background(), observability, ObservabilitySchema().FrameworkType(), &obj) + assert.Nil(t, diags) + }) + } +} diff --git a/ec/ecresource/deploymentresource/observability/v2/schema.go b/ec/ecresource/deploymentresource/observability/v2/schema.go new file mode 100644 index 000000000..634e1ee5f --- /dev/null +++ b/ec/ecresource/deploymentresource/observability/v2/schema.go @@ -0,0 +1,62 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package v2 + +import ( + "github.com/elastic/terraform-provider-ec/ec/internal/planmodifier" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" +) + +func ObservabilitySchema() tfsdk.Attribute { + return tfsdk.Attribute{ + Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", + Optional: true, + Attributes: tfsdk.SingleNestedAttributes(map[string]tfsdk.Attribute{ + "deployment_id": { + Type: types.StringType, + Required: true, + }, + "ref_id": { + Type: types.StringType, + Computed: true, + Optional: true, + PlanModifiers: tfsdk.AttributePlanModifiers{ + resource.UseStateForUnknown(), + }, + }, + "logs": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + }, + }, + "metrics": { + Type: types.BoolType, + Optional: true, + Computed: true, + PlanModifiers: []tfsdk.AttributePlanModifier{ + planmodifier.DefaultValue(types.Bool{Value: true}), + }, + }, + }), + } +} diff --git a/ec/ecresource/deploymentresource/read.go b/ec/ecresource/deploymentresource/read.go index 088645d88..5db891b63 100644 --- a/ec/ecresource/deploymentresource/read.go +++ b/ec/ecresource/deploymentresource/read.go @@ -20,25 +20,64 @@ package deploymentresource import ( "context" "errors" + "fmt" - "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/blang/semver" "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deputil" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/esremoteclustersapi" "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + apmv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/apm/v2" + deploymentv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + elasticsearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/elasticsearch/v2" + enterprisesearchv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/enterprisesearch/v2" + integrationsserverv2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/integrationsserver/v2" + kibanav2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/kibana/v2" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) -// Read queries the remote deployment state and updates the local state. -func readResource(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + if !r.ready(&response.Diagnostics) { + return + } + + var curState deploymentv2.DeploymentTF + + diags := request.State.Get(ctx, &curState) + + response.Diagnostics.Append(diags...) + + if response.Diagnostics.HasError() { + return + } + + var newState *deploymentv2.Deployment + + // use state for the plan (there is no plan and config during Read) - otherwise we can get unempty plan output + newState, diags = r.read(ctx, curState.Id.Value, &curState, curState, nil) + + response.Diagnostics.Append(diags...) + + if newState == nil { + response.State.RemoveResource(ctx) + } + + if newState != nil { + diags = response.State.Set(ctx, newState) + } + + response.Diagnostics.Append(diags...) +} + +func (r *Resource) read(ctx context.Context, id string, state *deploymentv2.DeploymentTF, plan deploymentv2.DeploymentTF, deploymentResources []*models.DeploymentResource) (*deploymentv2.Deployment, diag.Diagnostics) { + var diags diag.Diagnostics - res, err := deploymentapi.Get(deploymentapi.GetParams{ - API: client, DeploymentID: d.Id(), + response, err := deploymentapi.Get(deploymentapi.GetParams{ + API: r.client, DeploymentID: id, QueryParams: deputil.QueryParams{ ShowSettings: true, ShowPlans: true, @@ -48,37 +87,81 @@ func readResource(_ context.Context, d *schema.ResourceData, meta interface{}) d }) if err != nil { if deploymentNotFound(err) { - d.SetId("") - return nil + diags.AddError("Deployment not found", err.Error()) + return nil, diags } - return diag.FromErr(multierror.NewPrefixed("failed reading deployment", err)) + diags.AddError("Deloyment get error", err.Error()) + return nil, diags } - if !hasRunningResources(res) { - d.SetId("") - return nil + if !HasRunningResources(response) { + return nil, nil + } + + if response.Resources == nil || len(response.Resources.Elasticsearch) == 0 { + diags.AddError("Get resource error", "cannot find Elasticsearch in response resources") + return nil, diags + } + + if response.Resources.Elasticsearch[0].Info.PlanInfo.Current != nil && response.Resources.Elasticsearch[0].Info.PlanInfo.Current.Plan != nil { + if err := checkVersion(response.Resources.Elasticsearch[0].Info.PlanInfo.Current.Plan.Elasticsearch.Version); err != nil { + diags.AddError("Get resource error", err.Error()) + return nil, diags + } + } + + refId := "" + + var elasticsearchPlan *elasticsearchv2.ElasticsearchTF + + if diags = tfsdk.ValueAs(ctx, plan.Elasticsearch, &elasticsearchPlan); diags.HasError() { + return nil, diags + } + + if elasticsearchPlan != nil { + refId = elasticsearchPlan.RefId.Value } - var diags diag.Diagnostics remotes, err := esremoteclustersapi.Get(esremoteclustersapi.GetParams{ - API: client, DeploymentID: d.Id(), - RefID: d.Get("elasticsearch.0.ref_id").(string), + API: r.client, DeploymentID: id, + RefID: refId, }) if err != nil { - diags = append(diags, diag.FromErr( - multierror.NewPrefixed("failed reading remote clusters", err), - )...) + diags.AddError("Remote clusters read error", err.Error()) + return nil, diags } - if remotes == nil { remotes = &models.RemoteResources{} } - if err := modelToState(d, res, *remotes); err != nil { - diags = append(diags, diag.FromErr(err)...) + deployment, err := deploymentv2.ReadDeployment(response, remotes, deploymentResources) + if err != nil { + diags.AddError("Deployment read error", err.Error()) + return nil, diags + } + + deployment.RequestId = plan.RequestId.Value + + deployment.SetCredentialsIfEmpty(state) + + deployment.ProcessSelfInObservability() + + if diags := deployment.NullifyUnusedEsTopologies(ctx, elasticsearchPlan); diags.HasError() { + return nil, diags + } + + // ReadDeployment returns empty config struct if there is no config, so we have to nullify it if plan doesn't contain it + // we use state for plan in Read and there is no state during import so we need to check elasticsearchPlan against nil + if elasticsearchPlan != nil && + elasticsearchPlan.Config.IsNull() && + deployment.Elasticsearch != nil && + deployment.Elasticsearch.Config != nil && + deployment.Elasticsearch.Config.IsEmpty() { + + deployment.Elasticsearch.Config = nil } - return diags + return deployment, diags } func deploymentNotFound(err error) bool { @@ -92,3 +175,53 @@ func deploymentNotFound(err error) bool { // We also check for the case where a 403 is thrown for ESS. return apierror.IsRuntimeStatusCode(err, 403) } + +var minimumSupportedVersion = semver.MustParse("6.6.0") + +func checkVersion(version string) error { + v, err := semver.New(version) + + if err != nil { + return fmt.Errorf("unable to parse deployment version: %w", err) + } + + if v.LT(minimumSupportedVersion) { + return fmt.Errorf( + `invalid deployment version "%s": minimum supported version is "%s"`, + v.String(), minimumSupportedVersion.String(), + ) + } + + return nil +} + +func HasRunningResources(res *models.DeploymentGetResponse) bool { + if res.Resources != nil { + for _, r := range res.Resources.Elasticsearch { + if !elasticsearchv2.IsElasticsearchStopped(r) { + return true + } + } + for _, r := range res.Resources.Kibana { + if !kibanav2.IsKibanaStopped(r) { + return true + } + } + for _, r := range res.Resources.Apm { + if !apmv2.IsApmStopped(r) { + return true + } + } + for _, r := range res.Resources.EnterpriseSearch { + if !enterprisesearchv2.IsEnterpriseSearchStopped(r) { + return true + } + } + for _, r := range res.Resources.IntegrationsServer { + if !integrationsserverv2.IsIntegrationsServerStopped(r) { + return true + } + } + } + return false +} diff --git a/ec/ecresource/deploymentresource/read_test.go b/ec/ecresource/deploymentresource/read_test.go index afdf2f1b4..29e0de51c 100644 --- a/ec/ecresource/deploymentresource/read_test.go +++ b/ec/ecresource/deploymentresource/read_test.go @@ -15,142 +15,19 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package deploymentresource_test import ( - "context" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/apierror" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/client/deployments" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/go-openapi/runtime" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" ) -func Test_readResource(t *testing.T) { - tc500Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC500 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - tc404Err := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - wantTC404 := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC404.SetId("") - - tc200Stopped := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - - wantTC200Stopped := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - State: newSampleLegacyDeployment(), - Schema: newSchema(), - }) - wantTC200Stopped.SetId("") - - type args struct { - ctx context.Context - d *schema.ResourceData - meta interface{} - } - tests := []struct { - name string - args args - want diag.Diagnostics - wantRD *schema.ResourceData - }{ - { - name: "returns an error when it receives a 500", - args: args{ - d: tc500Err, - meta: api.NewMock(mock.NewErrorResponse(500, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: diag.Diagnostics{ - { - Severity: diag.Error, - Summary: "failed reading deployment: 1 error occurred:\n\t* api error: some: message\n\n", - }, - }, - wantRD: wantTC500, - }, - { - name: "returns nil and unsets the state when the error is known", - args: args{ - d: tc404Err, - meta: api.NewMock(mock.NewErrorResponse(404, mock.APIError{ - Code: "some", Message: "message", - })), - }, - want: nil, - wantRD: wantTC404, - }, - { - name: "returns nil and unsets the state when none of the deployment resources are running", - args: args{ - d: tc200Stopped, - meta: api.NewMock(mock.New200StructResponse(models.DeploymentGetResponse{ - Resources: &models.DeploymentResources{ - Elasticsearch: []*models.ElasticsearchResourceInfo{{ - Info: &models.ElasticsearchClusterInfo{Status: ec.String("stopped")}, - }}, - }, - })), - }, - want: nil, - wantRD: wantTC200Stopped, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := readResource(tt.args.ctx, tt.args.d, tt.args.meta) - assert.Equal(t, tt.want, got) - var want interface{} - if tt.wantRD != nil { - if s := tt.wantRD.State(); s != nil { - want = s.Attributes - } - } - - var gotState interface{} - if s := tt.args.d.State(); s != nil { - gotState = s.Attributes - } - - assert.Equal(t, want, gotState) - }) - } -} - -func Test_deploymentNotFound(t *testing.T) { +func Test_hasRunningResources(t *testing.T) { type args struct { - err error + res *models.DeploymentGetResponse } tests := []struct { name string @@ -158,39 +35,64 @@ func Test_deploymentNotFound(t *testing.T) { want bool }{ { - name: "When the error is empty, it returns false", - }, - { - name: "When the error is something else (500), it returns false", - args: args{ - err: &apierror.Error{Err: &runtime.APIError{Code: 500}}, - }, - }, - { - name: "When the error is something else (401), it returns false", - args: args{ - err: &apierror.Error{Err: &deployments.GetDeploymentUnauthorized{}}, - }, + name: "has all the resources stopped", + args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + {Info: &models.ElasticsearchClusterInfo{Status: ec.String("stopped")}}, + }, + Kibana: []*models.KibanaResourceInfo{ + {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, + }, + Apm: []*models.ApmResourceInfo{ + {Info: &models.ApmInfo{Status: ec.String("stopped")}}, + }, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + {Info: &models.EnterpriseSearchInfo{Status: ec.String("stopped")}}, + }, + }}}, + want: false, }, { - name: "When the deployment is not found, it returns true", - args: args{ - err: &apierror.Error{Err: &deployments.GetDeploymentNotFound{}}, - }, + name: "has some resources stopped", + args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, + }, + Kibana: []*models.KibanaResourceInfo{ + {Info: &models.KibanaClusterInfo{Status: ec.String("stopped")}}, + }, + Apm: []*models.ApmResourceInfo{ + {Info: &models.ApmInfo{Status: ec.String("running")}}, + }, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, + }, + }}}, want: true, }, { - name: "When the deployment is not authorized it returns true, to account for the DR case (ESS)", - args: args{ - err: &apierror.Error{Err: &runtime.APIError{Code: 403}}, - }, + name: "has all resources running", + args: args{res: &models.DeploymentGetResponse{Resources: &models.DeploymentResources{ + Elasticsearch: []*models.ElasticsearchResourceInfo{ + {Info: &models.ElasticsearchClusterInfo{Status: ec.String("running")}}, + }, + Kibana: []*models.KibanaResourceInfo{ + {Info: &models.KibanaClusterInfo{Status: ec.String("running")}}, + }, + Apm: []*models.ApmResourceInfo{ + {Info: &models.ApmInfo{Status: ec.String("running")}}, + }, + EnterpriseSearch: []*models.EnterpriseSearchResourceInfo{ + {Info: &models.EnterpriseSearchInfo{Status: ec.String("running")}}, + }, + }}}, want: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := deploymentNotFound(tt.args.err); got != tt.want { - t.Errorf("deploymentNotFound() = %v, want %v", got, tt.want) + if got := deploymentresource.HasRunningResources(tt.args.res); got != tt.want { + t.Errorf("hasRunningResources() = %v, want %v", got, tt.want) } }) } diff --git a/ec/ecresource/deploymentresource/resource.go b/ec/ecresource/deploymentresource/resource.go index 380ae7217..4624ae9e3 100644 --- a/ec/ecresource/deploymentresource/resource.go +++ b/ec/ecresource/deploymentresource/resource.go @@ -18,39 +18,51 @@ package deploymentresource import ( - "time" + "context" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/elastic/terraform-provider-ec/ec/internal" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" ) -// Resource returns the ec_deployment resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - CreateContext: createResource, - ReadContext: readResource, - UpdateContext: updateResource, - DeleteContext: deleteResource, - - Schema: newSchema(), - - Description: "Elastic Cloud Deployment resource", - Importer: &schema.ResourceImporter{ - StateContext: importFunc, - }, - - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(40 * time.Minute), - Update: schema.DefaultTimeout(60 * time.Minute), - Delete: schema.DefaultTimeout(60 * time.Minute), - }, - - SchemaVersion: 1, - StateUpgraders: []schema.StateUpgrader{ - { - Type: resourceSchemaV0().CoreConfigSchema().ImpliedType(), - Upgrade: resourceStateUpgradeV0, - Version: 0, - }, - }, +// Ensure provider defined types fully satisfy framework interfaces +// var _ tpfprovider.ResourceType = DeploymentResourceType{} +var _ resource.ResourceWithImportState = &Resource{} + +type Resource struct { + client *api.API +} + +func (r *Resource) ready(dg *diag.Diagnostics) bool { + if r.client == nil { + dg.AddError( + "Unconfigured API Client", + "Expected configured API client. Please report this issue to the provider developers.", + ) + + return false } + return true +} + +func (r *Resource) Configure(ctx context.Context, request resource.ConfigureRequest, response *resource.ConfigureResponse) { + client, diags := internal.ConvertProviderData(request.ProviderData) + response.Diagnostics.Append(diags...) + r.client = client +} + +func (t *Resource) GetSchema(ctx context.Context) (tfsdk.Schema, diag.Diagnostics) { + return v2.DeploymentSchema(), nil +} + +func (r *Resource) Metadata(ctx context.Context, request resource.MetadataRequest, response *resource.MetadataResponse) { + response.TypeName = request.ProviderTypeName + "_deployment" +} + +func (r *Resource) ImportState(ctx context.Context, req resource.ImportStateRequest, resp *resource.ImportStateResponse) { + resource.ImportStatePassthroughID(ctx, path.Root("id"), req, resp) } diff --git a/ec/ecresource/deploymentresource/schema.go b/ec/ecresource/deploymentresource/schema.go deleted file mode 100644 index 770574618..000000000 --- a/ec/ecresource/deploymentresource/schema.go +++ /dev/null @@ -1,211 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -const ( - minimumKibanaSize = 1024 - minimumApmSize = 512 - minimumEnterpriseSearchSize = 2048 - minimumIntegrationsServerSize = 1024 - - minimumZoneCount = 1 -) - -// newSchema returns the schema for an "ec_deployment" resource. -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "alias": { - Type: schema.TypeString, - Description: "Optional deployment alias that affects the format of the resource URLs", - Optional: true, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Description: "Required Elastic Stack version to use for all of the deployment resources", - Required: true, - }, - "region": { - Type: schema.TypeString, - Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, - Required: true, - ForceNew: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Description: "Required Deployment Template identifier to create the deployment from", - Required: true, - }, - "name": { - Type: schema.TypeString, - Description: "Optional name for the deployment", - Optional: true, - }, - "request_id": { - Type: schema.TypeString, - Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", - Optional: true, - }, - - // Computed ES Creds - "elasticsearch_username": { - Type: schema.TypeString, - Description: "Computed username obtained upon creating the Elasticsearch resource", - Computed: true, - }, - "elasticsearch_password": { - Type: schema.TypeString, - Description: "Computed password obtained upon creating the Elasticsearch resource", - Computed: true, - Sensitive: true, - }, - - // APM secret_token - "apm_secret_token": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - - // Resources - "elasticsearch": { - Type: schema.TypeList, - Description: "Required Elasticsearch resource definition", - MaxItems: 1, - Required: true, - Elem: newElasticsearchResource(), - }, - "kibana": { - Type: schema.TypeList, - Description: "Optional Kibana resource definition", - Optional: true, - MaxItems: 1, - Elem: newKibanaResource(), - }, - "apm": { - Type: schema.TypeList, - Description: "Optional APM resource definition", - Optional: true, - MaxItems: 1, - Elem: newApmResource(), - }, - "integrations_server": { - Type: schema.TypeList, - Description: "Optional Integrations Server resource definition", - Optional: true, - MaxItems: 1, - Elem: newIntegrationsServerResource(), - }, - "enterprise_search": { - Type: schema.TypeList, - Description: "Optional Enterprise Search resource definition", - Optional: true, - MaxItems: 1, - Elem: newEnterpriseSearchResource(), - }, - - // Settings - "traffic_filter": { - Description: "Optional list of traffic filters to apply to this deployment.", - // This field is a TypeSet since the order of the items isn't - // important, but the unique list is. This prevents infinite loops - // for autogenerated IDs. - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - MinItems: 1, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - "observability": { - Type: schema.TypeList, - Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", - Optional: true, - MaxItems: 1, - Elem: newObservabilitySettings(), - }, - - "tags": { - Description: "Optional map of deployment tags", - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - } -} - -func newObservabilitySettings() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Required: true, - DiffSuppressFunc: func(k, oldValue, newValue string, d *schema.ResourceData) bool { - // The terraform config can contain 'self' as a deployment target - // However the API will return the actual deployment-id. - // This overrides 'self' with the deployment-id so the diff will work correctly. - var deploymentID = d.Id() - var mappedOldValue = mapSelfToDeploymentID(oldValue, deploymentID) - var mappedNewValue = mapSelfToDeploymentID(newValue, deploymentID) - - return mappedOldValue == mappedNewValue - }, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "logs": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "metrics": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - } -} - -func mapSelfToDeploymentID(value string, deploymentID string) string { - if value == "self" && deploymentID != "" { - // If the deployment has a deployment-id, replace 'self' with the deployment-id - return deploymentID - } - - return value -} - -// suppressMissingOptionalConfigurationBlock handles configuration block attributes in the following scenario: -// - The resource schema includes an optional configuration block with defaults -// - The API response includes those defaults to refresh into the Terraform state -// - The operator's configuration omits the optional configuration block -func suppressMissingOptionalConfigurationBlock(k, old, new string, d *schema.ResourceData) bool { - return old == "1" && new == "0" -} diff --git a/ec/ecresource/deploymentresource/schema_apm.go b/ec/ecresource/deploymentresource/schema_apm.go deleted file mode 100644 index db4460075..000000000 --- a/ec/ecresource/deploymentresource/schema_apm.go +++ /dev/null @@ -1,142 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newApmResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-apm", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": apmTopologySchema(), - - "config": apmConfig(), - - // TODO: Implement settings field. - // "settings": interface{} - }, - } -} - -func apmTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - } -} - -func apmConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Apm configuration options for the APM Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the APM nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - // APM System Settings - "debug_enabled": { - Type: schema.TypeBool, - Description: `Optionally enable debug mode for APM servers - defaults to false`, - Optional: true, - Default: false, - }, - - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_elasticsearch.go b/ec/ecresource/deploymentresource/schema_elasticsearch.go deleted file mode 100644 index 5534d1ded..000000000 --- a/ec/ecresource/deploymentresource/schema_elasticsearch.go +++ /dev/null @@ -1,544 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "bytes" - "fmt" - "strconv" - "strings" - - "github.com/elastic/cloud-sdk-go/pkg/util/slice" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func newElasticsearchResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "autoscale": { - Type: schema.TypeString, - Description: `Enable or disable autoscaling. Defaults to the setting coming from the deployment template. Accepted values are "true" or "false".`, - Computed: true, - Optional: true, - ValidateFunc: func(i interface{}, s string) ([]string, []error) { - if _, err := strconv.ParseBool(i.(string)); err != nil { - return nil, []error{ - fmt.Errorf("failed parsing autoscale value: %w", err), - } - } - return nil, nil - }, - }, - - "ref_id": { - Type: schema.TypeString, - Description: "Optional ref_id to set on the Elasticsearch resource", - Default: "main-elasticsearch", - Optional: true, - }, - - // Computed attributes - "resource_id": { - Type: schema.TypeString, - Description: "The Elasticsearch resource unique identifier", - Computed: true, - }, - "region": { - Type: schema.TypeString, - Description: "The Elasticsearch resource region", - Computed: true, - }, - "cloud_id": { - Type: schema.TypeString, - Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTP endpoint", - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTPs endpoint", - Computed: true, - }, - - // Sub-objects - "topology": elasticsearchTopologySchema(), - - "config": elasticsearchConfig(), - - "remote_cluster": elasticsearchRemoteCluster(), - - "snapshot_source": newSnapshotSourceSettings(), - - "extension": newExtensionSchema(), - - "trust_account": newTrustAccountSchema(), - "trust_external": newTrustExternalSchema(), - - "strategy": newStrategySchema(), - }, - } -} - -func elasticsearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - MinItems: 1, - Optional: true, - Computed: true, - Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "id": { - Type: schema.TypeString, - Description: `Required topology ID from the deployment template`, - Required: true, - }, - "instance_configuration_id": { - Type: schema.TypeString, - Description: `Computed Instance Configuration ID of the topology element`, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Description: `Optional amount of memory per node in the "g" notation`, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, - Computed: true, - Optional: true, - }, - "node_type_data": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (data node)`, - Computed: true, - Optional: true, - }, - "node_type_master": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (master node)`, - Computed: true, - Optional: true, - }, - "node_type_ingest": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (ingest node)`, - Computed: true, - Optional: true, - }, - "node_type_ml": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (machine learning node)`, - Computed: true, - Optional: true, - }, - "node_roles": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: `The computed list of node roles for the current topology element`, - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - "autoscaling": { - Type: schema.TypeList, - Description: "Optional Elasticsearch autoscaling settings, such a maximum and minimum size and resources.", - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "max_size_resource": { - Description: "Maximum resource type for the maximum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "max_size": { - Description: "Maximum size value for the maximum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "min_size_resource": { - Description: "Minimum resource type for the minimum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "min_size": { - Description: "Minimum size value for the minimum autoscaling setting.", - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - - "policy_override_json": { - Type: schema.TypeString, - Description: "Computed policy overrides set directly via the API or other clients.", - Computed: true, - }, - }, - }, - }, - - // Read only config block that is present in the provider to - // avoid unsetting already set 'topology.elasticsearch' in the - // deployment plan. - "config": { - Type: schema.TypeList, - Computed: true, - Description: `Computed read-only configuration to avoid unsetting plan settings from 'topology.elasticsearch'`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Settings - - // plugins maps to the `enabled_built_in_plugins` API setting. - "plugins": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", - Computed: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - - // User settings - "user_settings_json": { - Type: schema.TypeString, - Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Computed: true, - }, - }, - }, - }, - }, - }, - } -} - -func elasticsearchConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Settings - - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the Elasticsearch nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - - // Ignored settings are: [ user_bundles and user_plugins ]. - // Adding support for them will allow users to specify - // "Extensions" as it is possible in the UI today. - // The implementation would differ between ECE and ESS. - - // plugins maps to the `enabled_built_in_plugins` API setting. - "plugins": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", - Optional: true, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - - // User settings - "user_settings_json": { - Type: schema.TypeString, - Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - }, - }, - } -} - -func elasticsearchRemoteCluster() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Optional: true, - Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Description: "Remote deployment ID", - Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(32, 32), - Required: true, - }, - "alias": { - Description: "Alias for this Cross Cluster Search binding", - Type: schema.TypeString, - ValidateFunc: validation.StringIsNotEmpty, - Required: true, - }, - "ref_id": { - Description: `Remote elasticsearch "ref_id", it is best left to the default value`, - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "skip_unavailable": { - Description: "If true, skip the cluster during search when disconnected", - Type: schema.TypeBool, - Default: false, - Optional: true, - }, - }, - }, - } -} - -func newSnapshotSourceSettings() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_elasticsearch_cluster_id": { - Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", - Type: schema.TypeString, - Required: true, - }, - "snapshot_name": { - Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", - Type: schema.TypeString, - Default: "__latest_success__", - Optional: true, - }, - }, - }, - } -} - -func newExtensionSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Set: esExtensionHash, - Description: "Optional Elasticsearch extensions such as custom bundles or plugins.", - Optional: true, - MinItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "name": { - Description: "Extension name.", - Type: schema.TypeString, - Required: true, - }, - "type": { - Description: "Extension type, only `bundle` or `plugin` are supported.", - Type: schema.TypeString, - Required: true, - ValidateFunc: func(val interface{}, _ string) ([]string, []error) { - t := val.(string) - if !slice.HasString([]string{"bundle", "plugin"}, t) { - return nil, []error{fmt.Errorf( - "invalid extension type %s: accepted values are bundle or plugin", - t, - )} - } - return nil, nil - }, - }, - "version": { - Description: "Elasticsearch compatibility version. Bundles should specify major or minor versions with wildcards, such as `7.*` or `*` but **plugins must use full version notation down to the patch level**, such as `7.10.1` and wildcards are not allowed.", - Type: schema.TypeString, - Required: true, - }, - "url": { - Description: "Bundle or plugin URL, the extension URL can be obtained from the `ec_deployment_extension..url` attribute or the API and cannot be a random HTTP address that is hosted elsewhere.", - Type: schema.TypeString, - Required: true, - }, - }, - }, - } -} - -func esExtensionHash(v interface{}) int { - var buf bytes.Buffer - m := v.(map[string]interface{}) - buf.WriteString(m["type"].(string)) - buf.WriteString(m["version"].(string)) - buf.WriteString(m["url"].(string)) - buf.WriteString(m["name"].(string)) - return schema.HashString(buf.String()) -} - -func newTrustAccountSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Description: "Optional Elasticsearch account trust settings.", - Optional: true, - Computed: true, - Elem: accountResource(), - } -} - -func accountResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "account_id": { - Description: "The ID of the Account.", - Type: schema.TypeString, - Required: true, - }, - "trust_all": { - Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", - Type: schema.TypeBool, - Required: true, - }, - "trust_allowlist": { - Description: "The list of clusters to trust. Only used when `trust_all` is false.", - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func newTrustExternalSchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeSet, - Description: "Optional Elasticsearch external trust settings.", - Optional: true, - Computed: true, - Elem: externalResource(), - } -} - -func externalResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "relationship_id": { - Description: "The ID of the external trust relationship.", - Type: schema.TypeString, - Required: true, - }, - "trust_all": { - Description: "If true, all clusters in this account will by default be trusted and the `trust_allowlist` is ignored.", - Type: schema.TypeBool, - Required: true, - }, - "trust_allowlist": { - Description: "The list of clusters to trust. Only used when `trust_all` is false.", - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }, - } -} - -func newStrategySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Description: "Configuration strategy settings.", - Optional: true, - MaxItems: 1, - Elem: strategyResource(), - } -} - -func strategyResource() *schema.Resource { - validValues := strings.Join(strategiesList, ", ") - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "type": { - Description: "Configuration strategy type " + validValues, - Type: schema.TypeString, - Required: true, - ValidateFunc: func(val interface{}, key string) (warns []string, errs []error) { - t := val.(string) - fmt.Printf("Validating %s in %v", t, validValues) - if !slice.HasString(strategiesList, t) { - errs = append(errs, fmt.Errorf(`invalid %s '%s': valid strategies are %v`, key, t, validValues)) - } - return - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_enteprise_search.go b/ec/ecresource/deploymentresource/schema_enteprise_search.go deleted file mode 100644 index 0863ac770..000000000 --- a/ec/ecresource/deploymentresource/schema_enteprise_search.go +++ /dev/null @@ -1,149 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newEnterpriseSearchResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-enterprise_search", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": enterpriseSearchTopologySchema(), - - "config": enterpriseSearchConfig(), - - // TODO: Implement settings field. - // "settings": interface{} - }, - } -} - -func enterpriseSearchTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - - // Node types - - "node_type_appserver": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_connector": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_worker": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - } -} - -func enterpriseSearchConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the Enterprise Search nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_integrations_server.go b/ec/ecresource/deploymentresource/schema_integrations_server.go deleted file mode 100644 index 70cadfe82..000000000 --- a/ec/ecresource/deploymentresource/schema_integrations_server.go +++ /dev/null @@ -1,147 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newIntegrationsServerResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-integrations_server", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "fleet_https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "apm_https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": integrationsServerTopologySchema(), - - "config": integrationsServerConfig(), - }, - } -} - -func integrationsServerTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - } -} - -func integrationsServerConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the IntegrationsServer configuration options for the IntegrationsServer Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the IntegrationsServer nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - // IntegrationsServer System Settings - "debug_enabled": { - Type: schema.TypeBool, - Description: `Optionally enable debug mode for IntegrationsServer servers - defaults to false`, - Optional: true, - Default: false, - }, - - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_kibana.go b/ec/ecresource/deploymentresource/schema_kibana.go deleted file mode 100644 index ab6541f0f..000000000 --- a/ec/ecresource/deploymentresource/schema_kibana.go +++ /dev/null @@ -1,131 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -func newKibanaResource() *schema.Resource { - return &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-kibana", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": kibanaTopologySchema(), - - "config": kibanaConfig(), - }, - } -} - -func kibanaTopologySchema() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - Computed: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - } -} - -func kibanaConfig() *schema.Schema { - return &schema.Schema{ - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Kibana configuration options for the Kibana Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "docker_image": { - Type: schema.TypeString, - Description: "Optionally override the docker image the Kibana nodes will use. Note that this field will only work for internal users only.", - Optional: true, - }, - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - } -} diff --git a/ec/ecresource/deploymentresource/schema_v0.go b/ec/ecresource/deploymentresource/schema_v0.go deleted file mode 100644 index 0ecbb9080..000000000 --- a/ec/ecresource/deploymentresource/schema_v0.go +++ /dev/null @@ -1,704 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "context" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" -) - -func resourceStateUpgradeV0(_ context.Context, raw map[string]interface{}, m interface{}) (map[string]interface{}, error) { - for _, apm := range raw["apm"].([]interface{}) { - rawApm := apm.(map[string]interface{}) - delete(rawApm, "version") - } - - for _, es := range raw["elasticsearch"].([]interface{}) { - rawEs := es.(map[string]interface{}) - delete(rawEs, "version") - } - - for _, ess := range raw["enterprise_search"].([]interface{}) { - rawEss := ess.(map[string]interface{}) - delete(rawEss, "version") - } - - for _, kibana := range raw["kibana"].([]interface{}) { - rawKibana := kibana.(map[string]interface{}) - delete(rawKibana, "version") - } - - return raw, nil -} - -// Copy of the revision 0 of the deployment schema. -func resourceSchemaV0() *schema.Resource { - return &schema.Resource{Schema: map[string]*schema.Schema{ - "version": { - Type: schema.TypeString, - Description: "Required Elastic Stack version to use for all of the deployment resources", - Required: true, - }, - "region": { - Type: schema.TypeString, - Description: `Required ESS region where to create the deployment, for ECE environments "ece-region" must be set`, - Required: true, - ForceNew: true, - }, - "deployment_template_id": { - Type: schema.TypeString, - Description: "Required Deployment Template identifier to create the deployment from", - Required: true, - }, - "name": { - Type: schema.TypeString, - Description: "Optional name for the deployment", - Optional: true, - }, - "request_id": { - Type: schema.TypeString, - Description: "Optional request_id to set on the create operation, only use when previous create attempts return with an error and a request_id is returned as part of the error", - Optional: true, - }, - - // Computed ES Creds - "elasticsearch_username": { - Type: schema.TypeString, - Description: "Computed username obtained upon creating the Elasticsearch resource", - Computed: true, - }, - "elasticsearch_password": { - Type: schema.TypeString, - Description: "Computed password obtained upon creating the Elasticsearch resource", - Computed: true, - Sensitive: true, - }, - - // APM secret_token - "apm_secret_token": { - Type: schema.TypeString, - Computed: true, - Sensitive: true, - }, - - // Resources - "elasticsearch": { - Type: schema.TypeList, - Description: "Required Elasticsearch resource definition", - MaxItems: 1, - Required: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "ref_id": { - Type: schema.TypeString, - Description: "Optional ref_id to set on the Elasticsearch resource", - Default: "main-elasticsearch", - Optional: true, - }, - - // Computed attributes - "resource_id": { - Type: schema.TypeString, - Description: "The Elasticsearch resource unique identifier", - Computed: true, - }, - "version": { - Type: schema.TypeString, - Description: "The Elasticsearch resource current version", - Computed: true, - }, - "region": { - Type: schema.TypeString, - Description: "The Elasticsearch resource region", - Computed: true, - }, - "cloud_id": { - Type: schema.TypeString, - Description: "The encoded Elasticsearch credentials to use in Beats or Logstash", - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTP endpoint", - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Description: "The Elasticsearch resource HTTPs endpoint", - Computed: true, - }, - - // Sub-objects - "topology": { - Type: schema.TypeList, - MinItems: 1, - Optional: true, - Computed: true, - Description: `Optional topology element which must be set once but can be set multiple times to compose complex topologies`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Description: `Computed Instance Configuration ID of the topology element`, - Computed: true, - Optional: true, - }, - "size": { - Type: schema.TypeString, - Description: `Optional amount of memory per node in the "g" notation`, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Description: `Optional number of zones that the Elasticsearch cluster will span. This is used to set HA`, - Computed: true, - Optional: true, - }, - "node_type_data": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (data node)`, - Computed: true, - Optional: true, - }, - "node_type_master": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (master node)`, - Computed: true, - Optional: true, - }, - "node_type_ingest": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (ingest node)`, - Computed: true, - Optional: true, - }, - "node_type_ml": { - Type: schema.TypeString, - Description: `The node type for the Elasticsearch Topology element (machine learning node)`, - Computed: true, - Optional: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optional Elasticsearch settings which will be applied to all topologies unless overridden on the topology element`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // Settings - - // Ignored settings are: [ user_bundles and user_plugins ]. - // Adding support for them will allow users to specify - // "Extensions" as it is possible in the UI today. - // The implementation would differ between ECE and ESS. - - // plugins maps to the `enabled_built_in_plugins` API setting. - "plugins": { - Type: schema.TypeSet, - Set: schema.HashString, - Description: "List of Elasticsearch supported plugins, which vary from version to version. Check the Stack Pack version to see which plugins are supported for each version. This is currently only available from the UI and [ecctl](https://www.elastic.co/guide/en/ecctl/master/ecctl_stack_list.html)", - Optional: true, - MinItems: 1, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - - // User settings - "user_settings_json": { - Type: schema.TypeString, - Description: `JSON-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `JSON-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted user level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `YAML-formatted admin (ECE) level "elasticsearch.yml" setting overrides`, - Optional: true, - }, - }, - }, - }, - - "remote_cluster": { - Type: schema.TypeList, - Optional: true, - MinItems: 1, - Description: "Optional Elasticsearch remote clusters to configure for the Elasticsearch resource, can be set multiple times", - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Description: "Remote deployment ID", - Type: schema.TypeString, - ValidateFunc: validation.StringLenBetween(32, 32), - Required: true, - }, - "alias": { - Description: "Alias for this Cross Cluster Search binding", - Type: schema.TypeString, - ValidateFunc: validation.StringIsNotEmpty, - Optional: true, - }, - "ref_id": { - Description: `Remote elasticsearch "ref_id", it is best left to the default value`, - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "skip_unavailable": { - Description: "If true, skip the cluster during search when disconnected", - Type: schema.TypeBool, - Default: false, - Optional: true, - }, - }, - }, - }, - - "snapshot_source": { - Type: schema.TypeList, - Description: "Optional snapshot source settings. Restore data from a snapshot of another deployment.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "source_elasticsearch_cluster_id": { - Description: "ID of the Elasticsearch cluster that will be used as the source of the snapshot", - Type: schema.TypeString, - Required: true, - }, - "snapshot_name": { - Description: "Name of the snapshot to restore. Use '__latest_success__' to get the most recent successful snapshot.", - Type: schema.TypeString, - Default: "__latest_success__", - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "kibana": { - Type: schema.TypeList, - Description: "Optional Kibana resource definition", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-kibana", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Kibana configuration options for the Kibana Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "apm": { - Type: schema.TypeList, - Description: "Optional APM resource definition", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-apm", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Apm configuration options for the APM Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - // APM System Settings - "debug_enabled": { - Type: schema.TypeBool, - Description: `Optionally enable debug mode for APM servers - defaults to false`, - Optional: true, - Default: false, - }, - - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - "enterprise_search": { - Type: schema.TypeList, - Description: "Optional Enterprise Search resource definition", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "elasticsearch_cluster_ref_id": { - Type: schema.TypeString, - Default: "main-elasticsearch", - Optional: true, - }, - "ref_id": { - Type: schema.TypeString, - Default: "main-enterprise_search", - Optional: true, - }, - "resource_id": { - Type: schema.TypeString, - Computed: true, - }, - "version": { - Type: schema.TypeString, - Computed: true, - }, - "region": { - Type: schema.TypeString, - Computed: true, - }, - "http_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "https_endpoint": { - Type: schema.TypeString, - Computed: true, - }, - "topology": { - Type: schema.TypeList, - Optional: true, - Computed: true, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "instance_configuration_id": { - Type: schema.TypeString, - Optional: true, - Computed: true, - }, - "size": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "size_resource": { - Type: schema.TypeString, - Description: `Optional size type, defaults to "memory".`, - Default: "memory", - Optional: true, - }, - "zone_count": { - Type: schema.TypeInt, - Computed: true, - Optional: true, - }, - - // Node types - - "node_type_appserver": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_connector": { - Type: schema.TypeBool, - Computed: true, - }, - "node_type_worker": { - Type: schema.TypeBool, - Computed: true, - }, - }, - }, - }, - - "config": { - Type: schema.TypeList, - Optional: true, - MaxItems: 1, - DiffSuppressFunc: suppressMissingOptionalConfigurationBlock, - Description: `Optionally define the Enterprise Search configuration options for the Enterprise Search Server`, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "user_settings_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_yaml' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (This field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_json": { - Type: schema.TypeString, - Description: `An arbitrary JSON object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_yaml' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing ECE admins owners to set clusters' parameters (only one of this and 'user_settings_override_json' is allowed), ie in addition to the documented 'system_settings'. (This field together with 'system_settings' and 'user_settings*' defines the total set of resource settings)`, - Optional: true, - }, - "user_settings_override_yaml": { - Type: schema.TypeString, - Description: `An arbitrary YAML object allowing (non-admin) cluster owners to set their parameters (only one of this and 'user_settings_json' is allowed), provided they are on the whitelist ('user_settings_whitelist') and not on the blacklist ('user_settings_blacklist'). (These field together with 'user_settings_override*' and 'system_settings' defines the total set of resource settings)`, - Optional: true, - }, - }, - }, - }, - }, - }, - }, - - // Settings - "traffic_filter": { - Description: "Optional list of traffic filters to apply to this deployment.", - // This field is a TypeSet since the order of the items isn't - // important, but the unique list is. This prevents infinite loops - // for autogenerated IDs. - Type: schema.TypeSet, - Set: schema.HashString, - Optional: true, - MinItems: 1, - Elem: &schema.Schema{ - MinItems: 1, - Type: schema.TypeString, - }, - }, - "observability": { - Type: schema.TypeList, - Description: "Optional observability settings. Ship logs and metrics to a dedicated deployment.", - Optional: true, - MaxItems: 1, - Elem: &schema.Resource{ - Schema: map[string]*schema.Schema{ - "deployment_id": { - Type: schema.TypeString, - Required: true, - }, - "ref_id": { - Type: schema.TypeString, - Computed: true, - Optional: true, - }, - "logs": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - "metrics": { - Type: schema.TypeBool, - Optional: true, - Default: true, - }, - }, - }, - }, - "tags": { - Description: "Optional map of deployment tags", - Type: schema.TypeMap, - Optional: true, - Elem: &schema.Schema{ - Type: schema.TypeString, - }, - }, - }} -} diff --git a/ec/ecresource/deploymentresource/stopped_resource.go b/ec/ecresource/deploymentresource/stopped_resource.go deleted file mode 100644 index 9d09bf52f..000000000 --- a/ec/ecresource/deploymentresource/stopped_resource.go +++ /dev/null @@ -1,50 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import "github.com/elastic/cloud-sdk-go/pkg/models" - -// isApmResourceStopped returns true if the resource is stopped. -func isApmResourceStopped(res *models.ApmResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// isIntegrationsServerResourceStopped returns true if the resource is stopped. -func isIntegrationsServerResourceStopped(res *models.IntegrationsServerResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// isEsResourceStopped returns true if the resource is stopped. -func isEsResourceStopped(res *models.ElasticsearchResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// isEssResourceStopped returns true if the resource is stopped. -func isEssResourceStopped(res *models.EnterpriseSearchResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} - -// isKibanaResourceStopped returns true if the resource is stopped. -func isKibanaResourceStopped(res *models.KibanaResourceInfo) bool { - return res == nil || res.Info == nil || res.Info.Status == nil || - *res.Info.Status == "stopped" -} diff --git a/ec/ecresource/deploymentresource/stopped_resource_test.go b/ec/ecresource/deploymentresource/stopped_resource_test.go deleted file mode 100644 index 6b987a49f..000000000 --- a/ec/ecresource/deploymentresource/stopped_resource_test.go +++ /dev/null @@ -1,154 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" -) - -func Test_isApmResourceStopped(t *testing.T) { - type args struct { - res *models.ApmResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.ApmResourceInfo{Info: &models.ApmInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := isApmResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_isEsResourceStopped(t *testing.T) { - type args struct { - res *models.ElasticsearchResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.ElasticsearchResourceInfo{Info: &models.ElasticsearchClusterInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := isEsResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_isEssResourceStopped(t *testing.T) { - type args struct { - res *models.EnterpriseSearchResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.EnterpriseSearchResourceInfo{Info: &models.EnterpriseSearchInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := isEssResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} - -func Test_isKibanaResourceStopped(t *testing.T) { - type args struct { - res *models.KibanaResourceInfo - } - tests := []struct { - name string - args args - want bool - }{ - { - name: "started resource returns false", - args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ - Status: ec.String("started"), - }}}, - want: false, - }, - { - name: "stopped resource returns true", - args: args{res: &models.KibanaResourceInfo{Info: &models.KibanaClusterInfo{ - Status: ec.String("stopped"), - }}}, - want: true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := isKibanaResourceStopped(tt.args.res) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json new file mode 100644 index 000000000..2e7fcbc29 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-create-expected-payload.json @@ -0,0 +1,214 @@ +{ + "metadata": { + "tags": [] + }, + "name": "my_deployment_name", + "resources": { + "apm": null, + "appsearch": null, + "elasticsearch": [ + { + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "id": "coordinating", + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 2 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "id": "hot_content", + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "size": { + "resource": "memory", + "value": 8192 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "zone_count": 2 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "id": "warm", + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 2 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "id": "cold", + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 1 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "id": "frozen", + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 1 + }, + { + "id": "master", + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 3 + }, + { + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "id": "ml", + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "size": { + "resource": "memory", + "value": 0 + }, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "zone_count": 1 + } + ], + "deployment_template": { + "id": "aws-io-optimized-v2" + }, + "elasticsearch": { + "version": "8.4.3" + } + }, + "ref_id": "main-elasticsearch", + "region": "us-east-1", + "settings": { + "dedicated_masters_threshold": 6 + } + } + ], + "enterprise_search": null, + "integrations_server": null, + "kibana": null + }, + "settings": {} + } diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json new file mode 100644 index 000000000..cfd220d12 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment1.json @@ -0,0 +1,767 @@ +{ + "name": "my_deployment_name", + "settings": { + "autoscaling_enabled": false + }, + "healthy": true, + "alias": "my-deployment-name", + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": { + "enterprise_search": [], + "kibana": [], + "elasticsearch": [ + { + "info": { + "status": "started", + "associated_apm_clusters": [], + "associated_kibana_clusters": [], + "locked": false, + "links": {}, + "associated_enterprise_search_clusters": [], + "healthy": true, + "associated_appsearch_clusters": [], + "region": "us-east-1", + "snapshots": { + "healthy": true, + "count": 0, + "recent_success": false + }, + "cluster_name": "my_deployment_name", + "plan_info": { + "healthy": true, + "current": { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [ + { + "status": "success", + "started": "2022-10-06T09:46:00.619Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:00.636Z", + "step_id": "plan-validator", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.640Z", + "duration_in_millis": 2, + "completed": "2022-10-06T09:46:00.642Z", + "step_id": "log-initial-plan-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.647Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.650Z", + "step_id": "detect-plan-strategy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.654Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:00.661Z", + "step_id": "calculate-incremental-elasticsearch-change", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.666Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.670Z", + "step_id": "resolve-instances-acls", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.673Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.677Z", + "step_id": "validate-plan-safety", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.680Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.683Z", + "step_id": "validate-elasticsearch-plugin-versions", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.687Z", + "duration_in_millis": 11, + "completed": "2022-10-06T09:46:00.698Z", + "step_id": "ensure-shield-system-key", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.702Z", + "duration_in_millis": 76, + "completed": "2022-10-06T09:46:00.778Z", + "step_id": "ensure-app-auth-tokens", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.783Z", + "duration_in_millis": 1008, + "completed": "2022-10-06T09:46:01.791Z", + "step_id": "add-shield-user", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.806Z", + "duration_in_millis": 176, + "completed": "2022-10-06T09:46:01.982Z", + "step_id": "validate-plan-prerequisites", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.994Z", + "duration_in_millis": 42, + "completed": "2022-10-06T09:46:02.036Z", + "step_id": "suspend-snapshotting", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.086Z", + "duration_in_millis": 33, + "completed": "2022-10-06T09:46:02.119Z", + "step_id": "ensure-s3-resources", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.132Z", + "duration_in_millis": 18, + "completed": "2022-10-06T09:46:02.150Z", + "step_id": "get-snapshot-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.163Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:02.180Z", + "step_id": "check-enterprise-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.209Z", + "duration_in_millis": 36, + "completed": "2022-10-06T09:46:02.245Z", + "step_id": "create-elasticsearch-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.309Z", + "duration_in_millis": 239, + "completed": "2022-10-06T09:46:02.548Z", + "step_id": "generate-node-certificates", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.552Z", + "duration_in_millis": 174, + "completed": "2022-10-06T09:46:02.726Z", + "step_id": "allocate-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.731Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:02.735Z", + "step_id": "override-instance-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.742Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:02.749Z", + "step_id": "update-initial-master-nodes", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.753Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:46:02.761Z", + "step_id": "seed-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.764Z", + "duration_in_millis": 13, + "completed": "2022-10-06T09:46:02.777Z", + "step_id": "start-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.780Z", + "duration_in_millis": 72645, + "completed": "2022-10-06T09:47:15.425Z", + "step_id": "wait-until-running", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.436Z", + "duration_in_millis": 30, + "completed": "2022-10-06T09:47:15.466Z", + "step_id": "wait-until-masters-elected", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.476Z", + "duration_in_millis": 25, + "completed": "2022-10-06T09:47:15.501Z", + "step_id": "verify-non-split", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:20.585Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:20.593Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.002Z", + "duration_in_millis": 119, + "completed": "2022-10-06T09:47:27.121Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.139Z", + "duration_in_millis": 1438, + "completed": "2022-10-06T09:47:28.577Z", + "step_id": "ensure-repository", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.588Z", + "duration_in_millis": 256, + "completed": "2022-10-06T09:47:28.844Z", + "step_id": "ensure-slm-policy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.849Z", + "duration_in_millis": 41, + "completed": "2022-10-06T09:47:28.890Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.893Z", + "duration_in_millis": 5, + "completed": "2022-10-06T09:47:28.898Z", + "step_id": "apply-monitoring-config", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.902Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:28.910Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.915Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:47:28.922Z", + "step_id": "apply-curation-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.927Z", + "duration_in_millis": 252, + "completed": "2022-10-06T09:47:29.179Z", + "step_id": "apply-plan-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.379Z", + "duration_in_millis": 46, + "completed": "2022-10-06T09:47:29.425Z", + "step_id": "post-plan-cleanup", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.430Z", + "duration_in_millis": 19, + "completed": "2022-10-06T09:47:29.449Z", + "step_id": "clean-up", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.673Z", + "duration_in_millis": 0, + "completed": "2022-10-06T09:47:29.673Z", + "step_id": "plan-completed", + "info_log": [], + "stage": "completed" + } + ], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "attempt_start_time": "2022-10-06T09:46:00.619Z" + }, + "history": [] + }, + "cluster_id": "0589ddb3acee4641b95833022bf04d2b", + "external_links": [], + "elasticsearch": { + "healthy": true, + "cluster_blocking_issues": { + "healthy": true, + "blocks": [] + }, + "master_info": { + "healthy": true, + "instances_with_no_master": [], + "masters": [ + { + "instances": [ + "tiebreaker-0000000002", + "instance-0000000001", + "instance-0000000000" + ], + "master_node_id": "U3kdKRNCQ3ym06KKyojljQ", + "master_instance_name": "instance-0000000001" + } + ] + }, + "shards_status": { + "status": "green" + }, + "blocking_issues": { + "healthy": true, + "cluster_level": [], + "index_level": [] + }, + "shard_info": { + "healthy": true, + "unavailable_shards": [], + "unavailable_replicas": [], + "available_shards": [] + } + }, + "deployment_id": "accd2e61fa835a5a32bb6b2938ce91f3", + "topology": { + "healthy": true, + "instances": [ + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1b", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000000", + "logical_zone": "zone-0", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-03b043eb9cee5566b", + "service_running": true + }, + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1e", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000001", + "logical_zone": "zone-1", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "native_memory_pressure": 55 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-0af729d3a795a93a3", + "service_running": true + }, + { + "service_roles": [ + "master" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1a", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "tiebreaker-0000000002", + "logical_zone": "tiebreaker", + "instance_configuration": { + "resource": "memory", + "id": "aws.master.r5d", + "name": "aws.master.r5d" + }, + "memory": { + "instance_capacity": 1024, + "native_memory_pressure": 79 + }, + "disk": { + "disk_space_available": 2048, + "storage_multiplier": 2.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "voting_only" + ], + "allocator_id": "i-04712f4bbc8e7072e", + "service_running": true + } + ] + }, + "metadata": { + "endpoint": "0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "sso_deep_linking_supported": false, + "last_modified": "2022-10-06T09:47:29.809Z", + "aliased_endpoint": "my-deployment-name.es.us-east-1.aws.found.io", + "ccr": true, + "version": 20, + "service_url": "https://0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "aliased_url": "https://my-deployment-name.es.us-east-1.aws.found.io", + "ports": { + "transport_passthrough": 9400, + "http": 9200, + "https": 443 + }, + "cloud_id": "my_deployment_name:someCloudID" + } + }, + "region": "us-east-1", + "id": "0589ddb3acee4641b95833022bf04d2b", + "ref_id": "main-elasticsearch" + } + ], + "apm": [], + "appsearch": [], + "integrations_server": [] + }, + "metadata": { + "last_resource_plan_modified": "2022-10-06T09:47:29.673Z", + "tags": [], + "organization_id": "222222", + "last_modified": "2022-10-06T09:47:29.809Z", + "hidden": false, + "system_owned": false, + "owner_id": "111111" + } +} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json new file mode 100644 index 000000000..2956cff32 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment2.json @@ -0,0 +1,1313 @@ +{ + "name": "my_deployment_name", + "settings": { + "autoscaling_enabled": false + }, + "healthy": true, + "alias": "my-deployment-name", + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": { + "enterprise_search": [], + "kibana": [], + "elasticsearch": [ + { + "info": { + "status": "started", + "associated_apm_clusters": [], + "associated_kibana_clusters": [], + "locked": false, + "links": {}, + "associated_enterprise_search_clusters": [], + "healthy": true, + "associated_appsearch_clusters": [], + "region": "us-east-1", + "snapshots": { + "healthy": true, + "count": 0, + "recent_success": false + }, + "cluster_name": "my_deployment_name", + "plan_info": { + "healthy": true, + "current": { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [ + { + "status": "success", + "started": "2022-10-06T09:46:00.619Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:00.636Z", + "step_id": "plan-validator", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.640Z", + "duration_in_millis": 2, + "completed": "2022-10-06T09:46:00.642Z", + "step_id": "log-initial-plan-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.647Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.650Z", + "step_id": "detect-plan-strategy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.654Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:00.661Z", + "step_id": "calculate-incremental-elasticsearch-change", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.666Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.670Z", + "step_id": "resolve-instances-acls", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.673Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.677Z", + "step_id": "validate-plan-safety", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.680Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.683Z", + "step_id": "validate-elasticsearch-plugin-versions", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.687Z", + "duration_in_millis": 11, + "completed": "2022-10-06T09:46:00.698Z", + "step_id": "ensure-shield-system-key", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.702Z", + "duration_in_millis": 76, + "completed": "2022-10-06T09:46:00.778Z", + "step_id": "ensure-app-auth-tokens", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.783Z", + "duration_in_millis": 1008, + "completed": "2022-10-06T09:46:01.791Z", + "step_id": "add-shield-user", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.806Z", + "duration_in_millis": 176, + "completed": "2022-10-06T09:46:01.982Z", + "step_id": "validate-plan-prerequisites", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.994Z", + "duration_in_millis": 42, + "completed": "2022-10-06T09:46:02.036Z", + "step_id": "suspend-snapshotting", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.086Z", + "duration_in_millis": 33, + "completed": "2022-10-06T09:46:02.119Z", + "step_id": "ensure-s3-resources", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.132Z", + "duration_in_millis": 18, + "completed": "2022-10-06T09:46:02.150Z", + "step_id": "get-snapshot-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.163Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:02.180Z", + "step_id": "check-enterprise-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.209Z", + "duration_in_millis": 36, + "completed": "2022-10-06T09:46:02.245Z", + "step_id": "create-elasticsearch-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.309Z", + "duration_in_millis": 239, + "completed": "2022-10-06T09:46:02.548Z", + "step_id": "generate-node-certificates", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.552Z", + "duration_in_millis": 174, + "completed": "2022-10-06T09:46:02.726Z", + "step_id": "allocate-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.731Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:02.735Z", + "step_id": "override-instance-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.742Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:02.749Z", + "step_id": "update-initial-master-nodes", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.753Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:46:02.761Z", + "step_id": "seed-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.764Z", + "duration_in_millis": 13, + "completed": "2022-10-06T09:46:02.777Z", + "step_id": "start-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.780Z", + "duration_in_millis": 72645, + "completed": "2022-10-06T09:47:15.425Z", + "step_id": "wait-until-running", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.436Z", + "duration_in_millis": 30, + "completed": "2022-10-06T09:47:15.466Z", + "step_id": "wait-until-masters-elected", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.476Z", + "duration_in_millis": 25, + "completed": "2022-10-06T09:47:15.501Z", + "step_id": "verify-non-split", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:20.585Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:20.593Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.002Z", + "duration_in_millis": 119, + "completed": "2022-10-06T09:47:27.121Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.139Z", + "duration_in_millis": 1438, + "completed": "2022-10-06T09:47:28.577Z", + "step_id": "ensure-repository", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.588Z", + "duration_in_millis": 256, + "completed": "2022-10-06T09:47:28.844Z", + "step_id": "ensure-slm-policy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.849Z", + "duration_in_millis": 41, + "completed": "2022-10-06T09:47:28.890Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.893Z", + "duration_in_millis": 5, + "completed": "2022-10-06T09:47:28.898Z", + "step_id": "apply-monitoring-config", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.902Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:28.910Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.915Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:47:28.922Z", + "step_id": "apply-curation-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.927Z", + "duration_in_millis": 252, + "completed": "2022-10-06T09:47:29.179Z", + "step_id": "apply-plan-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.379Z", + "duration_in_millis": 46, + "completed": "2022-10-06T09:47:29.425Z", + "step_id": "post-plan-cleanup", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.430Z", + "duration_in_millis": 19, + "completed": "2022-10-06T09:47:29.449Z", + "step_id": "clean-up", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.673Z", + "duration_in_millis": 0, + "completed": "2022-10-06T09:47:29.673Z", + "step_id": "plan-completed", + "info_log": [], + "stage": "completed" + } + ], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "attempt_start_time": "2022-10-06T09:46:00.619Z" + }, + "history": [ + { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [ + { + "status": "success", + "started": "2022-10-06T09:46:00.619Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:00.636Z", + "step_id": "plan-validator", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.640Z", + "duration_in_millis": 2, + "completed": "2022-10-06T09:46:00.642Z", + "step_id": "log-initial-plan-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.647Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.650Z", + "step_id": "detect-plan-strategy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.654Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:00.661Z", + "step_id": "calculate-incremental-elasticsearch-change", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.666Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.670Z", + "step_id": "resolve-instances-acls", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.673Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:00.677Z", + "step_id": "validate-plan-safety", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.680Z", + "duration_in_millis": 3, + "completed": "2022-10-06T09:46:00.683Z", + "step_id": "validate-elasticsearch-plugin-versions", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.687Z", + "duration_in_millis": 11, + "completed": "2022-10-06T09:46:00.698Z", + "step_id": "ensure-shield-system-key", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.702Z", + "duration_in_millis": 76, + "completed": "2022-10-06T09:46:00.778Z", + "step_id": "ensure-app-auth-tokens", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:00.783Z", + "duration_in_millis": 1008, + "completed": "2022-10-06T09:46:01.791Z", + "step_id": "add-shield-user", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.806Z", + "duration_in_millis": 176, + "completed": "2022-10-06T09:46:01.982Z", + "step_id": "validate-plan-prerequisites", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:01.994Z", + "duration_in_millis": 42, + "completed": "2022-10-06T09:46:02.036Z", + "step_id": "suspend-snapshotting", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.086Z", + "duration_in_millis": 33, + "completed": "2022-10-06T09:46:02.119Z", + "step_id": "ensure-s3-resources", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.132Z", + "duration_in_millis": 18, + "completed": "2022-10-06T09:46:02.150Z", + "step_id": "get-snapshot-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.163Z", + "duration_in_millis": 17, + "completed": "2022-10-06T09:46:02.180Z", + "step_id": "check-enterprise-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.209Z", + "duration_in_millis": 36, + "completed": "2022-10-06T09:46:02.245Z", + "step_id": "create-elasticsearch-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.309Z", + "duration_in_millis": 239, + "completed": "2022-10-06T09:46:02.548Z", + "step_id": "generate-node-certificates", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.552Z", + "duration_in_millis": 174, + "completed": "2022-10-06T09:46:02.726Z", + "step_id": "allocate-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.731Z", + "duration_in_millis": 4, + "completed": "2022-10-06T09:46:02.735Z", + "step_id": "override-instance-data", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.742Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:46:02.749Z", + "step_id": "update-initial-master-nodes", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.753Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:46:02.761Z", + "step_id": "seed-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.764Z", + "duration_in_millis": 13, + "completed": "2022-10-06T09:46:02.777Z", + "step_id": "start-instances", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:46:02.780Z", + "duration_in_millis": 72645, + "completed": "2022-10-06T09:47:15.425Z", + "step_id": "wait-until-running", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.436Z", + "duration_in_millis": 30, + "completed": "2022-10-06T09:47:15.466Z", + "step_id": "wait-until-masters-elected", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:15.476Z", + "duration_in_millis": 25, + "completed": "2022-10-06T09:47:15.501Z", + "step_id": "verify-non-split", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:20.585Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:20.593Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.002Z", + "duration_in_millis": 119, + "completed": "2022-10-06T09:47:27.121Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:27.139Z", + "duration_in_millis": 1438, + "completed": "2022-10-06T09:47:28.577Z", + "step_id": "ensure-repository", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.588Z", + "duration_in_millis": 256, + "completed": "2022-10-06T09:47:28.844Z", + "step_id": "ensure-slm-policy", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.849Z", + "duration_in_millis": 41, + "completed": "2022-10-06T09:47:28.890Z", + "step_id": "apply-cluster-license", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.893Z", + "duration_in_millis": 5, + "completed": "2022-10-06T09:47:28.898Z", + "step_id": "apply-monitoring-config", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.902Z", + "duration_in_millis": 8, + "completed": "2022-10-06T09:47:28.910Z", + "step_id": "set-maintenance", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.915Z", + "duration_in_millis": 7, + "completed": "2022-10-06T09:47:28.922Z", + "step_id": "apply-curation-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:28.927Z", + "duration_in_millis": 252, + "completed": "2022-10-06T09:47:29.179Z", + "step_id": "apply-plan-settings", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.379Z", + "duration_in_millis": 46, + "completed": "2022-10-06T09:47:29.425Z", + "step_id": "post-plan-cleanup", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.430Z", + "duration_in_millis": 19, + "completed": "2022-10-06T09:47:29.449Z", + "step_id": "clean-up", + "info_log": [], + "stage": "completed" + }, + { + "status": "success", + "started": "2022-10-06T09:47:29.673Z", + "duration_in_millis": 0, + "completed": "2022-10-06T09:47:29.673Z", + "step_id": "plan-completed", + "info_log": [], + "stage": "completed" + } + ], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_name": "attempt-0000000000", + "attempt_start_time": "2022-10-06T09:46:00.636Z" + } + ] + }, + "cluster_id": "0589ddb3acee4641b95833022bf04d2b", + "external_links": [], + "elasticsearch": { + "healthy": true, + "cluster_blocking_issues": { + "healthy": true, + "blocks": [] + }, + "master_info": { + "healthy": true, + "instances_with_no_master": [], + "masters": [ + { + "instances": [ + "tiebreaker-0000000002", + "instance-0000000001", + "instance-0000000000" + ], + "master_node_id": "U3kdKRNCQ3ym06KKyojljQ", + "master_instance_name": "instance-0000000001" + } + ] + }, + "shards_status": { + "status": "green" + }, + "blocking_issues": { + "healthy": true, + "cluster_level": [], + "index_level": [] + }, + "shard_info": { + "healthy": true, + "unavailable_shards": [], + "unavailable_replicas": [], + "available_shards": [] + } + }, + "deployment_id": "accd2e61fa835a5a32bb6b2938ce91f3", + "topology": { + "healthy": true, + "instances": [ + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1b", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000000", + "logical_zone": "zone-0", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-03b043eb9cee5566b", + "service_running": true + }, + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1e", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000001", + "logical_zone": "zone-1", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "native_memory_pressure": 55 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-0af729d3a795a93a3", + "service_running": true + }, + { + "service_roles": [ + "master" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1a", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "tiebreaker-0000000002", + "logical_zone": "tiebreaker", + "instance_configuration": { + "resource": "memory", + "id": "aws.master.r5d", + "name": "aws.master.r5d" + }, + "memory": { + "instance_capacity": 1024, + "memory_pressure": 10, + "native_memory_pressure": 79 + }, + "disk": { + "disk_space_available": 2048, + "storage_multiplier": 2.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "voting_only" + ], + "allocator_id": "i-04712f4bbc8e7072e", + "service_running": true + } + ] + }, + "metadata": { + "endpoint": "0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "sso_deep_linking_supported": false, + "last_modified": "2022-10-06T09:47:29.809Z", + "aliased_endpoint": "my-deployment-name.es.us-east-1.aws.found.io", + "ccr": true, + "version": 20, + "service_url": "https://0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "aliased_url": "https://my-deployment-name.es.us-east-1.aws.found.io", + "ports": { + "transport_passthrough": 9400, + "http": 9200, + "https": 443 + }, + "cloud_id": "my_deployment_name:someCloudID" + } + }, + "region": "us-east-1", + "id": "0589ddb3acee4641b95833022bf04d2b", + "ref_id": "main-elasticsearch" + } + ], + "apm": [], + "appsearch": [], + "integrations_server": [] + }, + "metadata": { + "last_resource_plan_modified": "2022-10-06T09:47:29.673Z", + "tags": [], + "organization_id": "222222", + "last_modified": "2022-10-06T09:47:29.809Z", + "hidden": false, + "system_owned": false, + "owner_id": "1111" + } +} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json new file mode 100644 index 000000000..846e7f822 --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2-empty-config-expected-deployment3.json @@ -0,0 +1,669 @@ +{ + "name": "my_deployment_name", + "settings": { + "autoscaling_enabled": false + }, + "healthy": true, + "alias": "my-deployment-name", + "id": "accd2e61fa835a5a32bb6b2938ce91f3", + "resources": { + "enterprise_search": [], + "kibana": [], + "elasticsearch": [ + { + "info": { + "status": "started", + "associated_apm_clusters": [], + "associated_kibana_clusters": [], + "locked": false, + "links": {}, + "associated_enterprise_search_clusters": [], + "settings": { + "trust": { + "accounts": [ + { + "trust_all": true, + "account_id": "222222", + "name": "Default trust for own organization" + } + ] + }, + "curation": { + "specs": [] + }, + "dedicated_masters_threshold": 6, + "snapshot": { + "slm": true, + "enabled": true, + "suspended": [], + "repository": { + "static": { + "repository_type": "s3-resource", + "settings": { + "aws_account": "operations-40-us-east-1", + "region": "us-east-1", + "bucket_name": "edf5c1f724604fe6b4ab7757509400c6", + "client_name": "elastic-internal-0589dd" + } + } + }, + "retention": {} + }, + "metadata": { + "name": "my_deployment_name", + "organization_id": "222222", + "subscription_level": "standard", + "hidden": false, + "system_owned": false, + "resources": { + "cpu": { + "boost": true, + "hard_limit": true + } + }, + "owner_id": "111111" + } + }, + "healthy": true, + "associated_appsearch_clusters": [], + "region": "us-east-1", + "snapshots": { + "healthy": true, + "count": 0, + "recent_success": false + }, + "cluster_name": "my_deployment_name", + "plan_info": { + "healthy": true, + "current": { + "attempt_end_time": "2022-10-06T09:47:29.673Z", + "warnings": [], + "healthy": true, + "source": { + "action": "deployments.create-deployment", + "date": "2022-10-06T09:45:59.875Z", + "user_id": "111111", + "facilitator": "adminconsole", + "remote_addresses": [ + "18.192.28.203", + "3.88.142.49" + ] + }, + "plan_attempt_log": [], + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + } + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + }, + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + } + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "elasticsearch": { + "system_settings": { + "reindex_whitelist": [], + "http": { + "cors_enabled": false, + "cors_allow_credentials": false, + "cors_max_age": 1728000, + "compression": true + }, + "monitoring_history_duration": "3d", + "monitoring_collection_interval": -1, + "destructive_requires_name": false, + "auto_create_index": true, + "scripting": { + "inline": { + "enabled": true + }, + "stored": { + "enabled": true + } + }, + "enable_close_index": true + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "tiebreaker_topology": { + "memory_per_node": 1024 + }, + "transient": { + "plan_configuration": { + "move_allocators": [], + "skip_upgrade_checker": false, + "reallocate_instances": false, + "skip_post_upgrade_steps": false, + "skip_snapshot": false, + "preferred_allocators": [], + "skip_data_migration": false, + "calm_wait_time": 5, + "timeout": 32768, + "override_failsafe": false, + "move_only": false, + "extended_maintenance": false, + "max_snapshot_attempts": 3, + "move_instances": [], + "max_snapshot_age": 300, + "skip_snapshot_post_major_upgrade": false + }, + "strategy": { + "autodetect": {} + } + }, + "elasticsearch": { + "version": "8.4.3" + }, + "deployment_template": { + "id": "aws-io-optimized-v2" + } + }, + "plan_attempt_id": "c656a76c-0567-4efc-84e0-ee317912a41e", + "attempt_start_time": "2022-10-06T09:46:00.619Z" + }, + "history": [] + }, + "cluster_id": "0589ddb3acee4641b95833022bf04d2b", + "external_links": [], + "system_alerts": [], + "elasticsearch": { + "healthy": true, + "cluster_blocking_issues": { + "healthy": true, + "blocks": [] + }, + "master_info": { + "healthy": true, + "instances_with_no_master": [], + "masters": [ + { + "instances": [ + "tiebreaker-0000000002", + "instance-0000000001", + "instance-0000000000" + ], + "master_node_id": "U3kdKRNCQ3ym06KKyojljQ", + "master_instance_name": "instance-0000000001" + } + ] + }, + "shards_status": { + "status": "green" + }, + "blocking_issues": { + "healthy": true, + "cluster_level": [], + "index_level": [] + }, + "shard_info": { + "healthy": true, + "unavailable_shards": [], + "unavailable_replicas": [], + "available_shards": [] + } + }, + "deployment_id": "accd2e61fa835a5a32bb6b2938ce91f3", + "topology": { + "healthy": true, + "instances": [ + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1b", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000000", + "logical_zone": "zone-0", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-03b043eb9cee5566b", + "service_running": true + }, + { + "service_roles": [ + "ingest", + "master", + "data" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1e", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "instance-0000000001", + "logical_zone": "zone-1", + "instance_configuration": { + "resource": "memory", + "id": "aws.data.highio.i3", + "name": "aws.data.highio.i3" + }, + "memory": { + "instance_capacity": 8192, + "memory_pressure": 1, + "native_memory_pressure": 55 + }, + "disk": { + "disk_space_available": 245760, + "storage_multiplier": 30.0, + "disk_space_used": 117 + }, + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "allocator_id": "i-0af729d3a795a93a3", + "service_running": true + }, + { + "service_roles": [ + "master" + ], + "instance_set_id": "hot_content", + "zone": "us-east-1a", + "container_started": true, + "service_version": "8.4.3", + "healthy": true, + "maintenance_mode": false, + "instance_name": "tiebreaker-0000000002", + "logical_zone": "tiebreaker", + "instance_configuration": { + "resource": "memory", + "id": "aws.master.r5d", + "name": "aws.master.r5d" + }, + "memory": { + "instance_capacity": 1024, + "memory_pressure": 10, + "native_memory_pressure": 79 + }, + "disk": { + "disk_space_available": 2048, + "storage_multiplier": 2.0, + "disk_space_used": 0 + }, + "node_roles": [ + "master", + "voting_only" + ], + "allocator_id": "i-04712f4bbc8e7072e", + "service_running": true + } + ] + }, + "metadata": { + "endpoint": "0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "sso_deep_linking_supported": false, + "last_modified": "2022-10-06T09:47:29.809Z", + "aliased_endpoint": "my-deployment-name.es.us-east-1.aws.found.io", + "ccr": true, + "version": 20, + "service_url": "https://0589ddb3acee4641b95833022bf04d2b.us-east-1.aws.found.io", + "aliased_url": "https://my-deployment-name.es.us-east-1.aws.found.io", + "ports": { + "transport_passthrough": 9400, + "http": 9200, + "https": 443 + }, + "cloud_id": "my_deployment_name:someCloudID" + } + }, + "region": "us-east-1", + "id": "0589ddb3acee4641b95833022bf04d2b", + "ref_id": "main-elasticsearch" + } + ], + "apm": [], + "appsearch": [], + "integrations_server": [] + }, + "metadata": { + "last_resource_plan_modified": "2022-10-06T09:47:29.673Z", + "tags": [], + "organization_id": "222222", + "subscription_level": "standard", + "last_modified": "2022-10-06T09:47:29.809Z", + "hidden": false, + "system_owned": false, + "owner_id": "111111" + } +} diff --git a/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json new file mode 100644 index 000000000..12d6a363b --- /dev/null +++ b/ec/ecresource/deploymentresource/testdata/aws-io-optimized-v2.json @@ -0,0 +1,363 @@ +{ + "instance_configurations": [], + "description": "Use for for all-purpose workloads, including time-series data like logs and metrics.", + "name": "I/O Optimized", + "template_category_id": "io-optimized", + "kibana_deeplink": [ + { + "semver": ">=7.9.0", + "uri": "/app/home" + }, + { + "semver": "<7.9.0", + "uri": "/app/kibana#/home" + } + ], + "id": "aws-io-optimized-v2", + "deployment_template": { + "resources": { + "integrations_server": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "aws.integrationsserver.r5d", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "integrations_server": {} + }, + "ref_id": "integrations_server-ref-id" + } + ], + "elasticsearch": [ + { + "region": "us-east-1", + "settings": { + "dedicated_masters_threshold": 6 + }, + "plan": { + "autoscaling_enabled": false, + "cluster_topology": [ + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": false, + "master": false, + "ingest": true + }, + "instance_configuration_id": "aws.coordinating.m5d", + "node_roles": [ + "ingest", + "remote_cluster_client" + ], + "id": "coordinating", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 1024 + } + }, + "node_type": { + "data": true, + "master": true, + "ingest": true + }, + "elasticsearch": { + "node_attributes": { + "data": "hot" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highio.i3", + "node_roles": [ + "master", + "ingest", + "transform", + "data_hot", + "remote_cluster_client", + "data_content" + ], + "id": "hot_content", + "size": { + "resource": "memory", + "value": 8192 + } + }, + { + "zone_count": 2, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": true, + "master": false, + "ingest": false + }, + "elasticsearch": { + "node_attributes": { + "data": "warm" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 118784 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_warm", + "remote_cluster_client" + ], + "id": "warm", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": true, + "master": false, + "ingest": false + }, + "elasticsearch": { + "node_attributes": { + "data": "cold" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 59392 + }, + "instance_configuration_id": "aws.data.highstorage.d3", + "node_roles": [ + "data_cold", + "remote_cluster_client" + ], + "id": "cold", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": true, + "master": false, + "ingest": false + }, + "elasticsearch": { + "node_attributes": { + "data": "frozen" + } + }, + "autoscaling_max": { + "resource": "memory", + "value": 122880 + }, + "instance_configuration_id": "aws.es.datafrozen.i3en", + "node_roles": [ + "data_frozen" + ], + "id": "frozen", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 3, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "node_type": { + "data": false, + "master": true, + "ingest": false + }, + "instance_configuration_id": "aws.master.r5d", + "node_roles": [ + "master", + "remote_cluster_client" + ], + "id": "master", + "size": { + "resource": "memory", + "value": 0 + } + }, + { + "zone_count": 1, + "topology_element_control": { + "min": { + "resource": "memory", + "value": 0 + } + }, + "autoscaling_min": { + "resource": "memory", + "value": 0 + }, + "node_type": { + "data": false, + "master": false, + "ingest": false, + "ml": true + }, + "autoscaling_max": { + "resource": "memory", + "value": 61440 + }, + "instance_configuration_id": "aws.ml.m5d", + "node_roles": [ + "ml", + "remote_cluster_client" + ], + "id": "ml", + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "elasticsearch": {} + }, + "ref_id": "es-ref-id" + } + ], + "enterprise_search": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "node_type": { + "connector": true, + "appserver": true, + "worker": true + }, + "instance_configuration_id": "aws.enterprisesearch.m5d", + "zone_count": 2, + "size": { + "resource": "memory", + "value": 0 + } + } + ], + "enterprise_search": {} + }, + "ref_id": "enterprise_search-ref-id" + } + ], + "kibana": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "aws.kibana.r5d", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "kibana": {} + }, + "ref_id": "kibana-ref-id" + } + ], + "apm": [ + { + "elasticsearch_cluster_ref_id": "es-ref-id", + "region": "us-east-1", + "plan": { + "cluster_topology": [ + { + "instance_configuration_id": "aws.apm.r5d", + "zone_count": 1, + "size": { + "resource": "memory", + "value": 1024 + } + } + ], + "apm": {} + }, + "ref_id": "apm-ref-id" + } + ] + }, + "settings": { + "autoscaling_enabled": false + } + }, + "system_owned": true, + "metadata": [ + { + "value": "true", + "key": "hidden" + }, + { + "value": "aws-hot-warm-v2", + "key": "hot_warm_template" + }, + { + "value": "true", + "key": "recommended" + }, + { + "value": "true", + "key": "trial-eligible" + }, + { + "value": "stack", + "key": "parent_solution" + } + ] +} diff --git a/ec/ecresource/deploymentresource/testutil/testutil_func.go b/ec/ecresource/deploymentresource/testutil/testutil_func.go new file mode 100644 index 000000000..3a1fbe739 --- /dev/null +++ b/ec/ecresource/deploymentresource/testutil/testutil_func.go @@ -0,0 +1,51 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package testutil + +import ( + "encoding/json" + "os" + "testing" + + "github.com/elastic/cloud-sdk-go/pkg/models" +) + +// parseDeploymentTemplate is a test helper which parse a file by path and +// returns a models.DeploymentTemplateInfoV2. +func ParseDeploymentTemplate(t *testing.T, name string) *models.DeploymentTemplateInfoV2 { + t.Helper() + f, err := os.Open(name) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + var res models.DeploymentTemplateInfoV2 + if err := json.NewDecoder(f).Decode(&res); err != nil { + t.Fatal(err) + } + + // Enriches the elasticsearch DT with the current DT. + if len(res.DeploymentTemplate.Resources.Elasticsearch) > 0 { + res.DeploymentTemplate.Resources.Elasticsearch[0].Plan.DeploymentTemplate = &models.DeploymentTemplateReference{ + ID: res.ID, + } + } + + return &res +} diff --git a/ec/ecresource/deploymentresource/testutil_func_test.go b/ec/ecresource/deploymentresource/testutil/testutil_func_test.go similarity index 96% rename from ec/ecresource/deploymentresource/testutil_func_test.go rename to ec/ecresource/deploymentresource/testutil/testutil_func_test.go index 1d07eff67..aa50a0411 100644 --- a/ec/ecresource/deploymentresource/testutil_func_test.go +++ b/ec/ecresource/deploymentresource/testutil/testutil_func_test.go @@ -15,15 +15,16 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package testutil import ( "os" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/stretchr/testify/assert" ) func Test_parseDeploymentTemplate(t *testing.T) { @@ -59,7 +60,7 @@ func Test_parseDeploymentTemplate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := parseDeploymentTemplate(t, tt.args.name) + got := ParseDeploymentTemplate(t, tt.args.name) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/deploymentresource/testutil_datastruct.go b/ec/ecresource/deploymentresource/testutil_datastruct.go deleted file mode 100644 index 13c475d27..000000000 --- a/ec/ecresource/deploymentresource/testutil_datastruct.go +++ /dev/null @@ -1,263 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/elastic/cloud-sdk-go/pkg/api/mock" -) - -func newSampleDeployment() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-hot-warm-v2", - "region": "us-east-1", - "version": "7.11.1", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "us-east-1", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{ - map[string]interface{}{ - "id": "hot_content", - "size": "2g", - "node_roles": []interface{}{ - "master", - "ingest", - "remote_cluster_client", - "data_hot", - "transform", - "data_content", - }, - "zone_count": 1, - }, - map[string]interface{}{ - "id": "warm", - "size": "2g", - "node_roles": []interface{}{ - "data_warm", - "remote_cluster_client", - }, - "zone_count": 1, - }, - }, - }}, - "kibana": []interface{}{newKibanaSample()}, - "apm": []interface{}{newApmSample()}, - "enterprise_search": []interface{}{newEnterpriseSearchSample()}, - "observability": []interface{}{newObservabilitySample()}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleLegacyDeployment() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{newElasticsearchSample()}, - "kibana": []interface{}{newKibanaSample()}, - "apm": []interface{}{newApmSample()}, - "enterprise_search": []interface{}{newEnterpriseSearchSample()}, - "observability": []interface{}{newObservabilitySample()}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleDeploymentEmptyRD() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{}}, - "kibana": []interface{}{map[string]interface{}{}}, - "apm": []interface{}{map[string]interface{}{}}, - "enterprise_search": []interface{}{map[string]interface{}{}}, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleDeploymentOverrides() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "size": "4g", - }}}, - }, - "kibana": []interface{}{map[string]interface{}{ - "ref_id": "main-kibana", - "topology": []interface{}{map[string]interface{}{ - "size": "2g", - }}}, - }, - "apm": []interface{}{map[string]interface{}{ - "ref_id": "main-apm", - "topology": []interface{}{map[string]interface{}{ - "size": "1g", - }}}, - }, - "enterprise_search": []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "topology": []interface{}{map[string]interface{}{ - "size": "4g", - }}}, - }, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newSampleDeploymentOverridesIC() map[string]interface{} { - return map[string]interface{}{ - "alias": "my-deployment", - "name": "my_deployment_name", - "deployment_template_id": "aws-io-optimized-v2", - "region": "us-east-1", - "version": "7.7.0", - "elasticsearch": []interface{}{map[string]interface{}{ - "ref_id": "main-elasticsearch", - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - }}}, - }, - "kibana": []interface{}{map[string]interface{}{ - "ref_id": "main-kibana", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - }}}, - }, - "apm": []interface{}{map[string]interface{}{ - "ref_id": "main-apm", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - }}}, - }, - "enterprise_search": []interface{}{map[string]interface{}{ - "ref_id": "main-enterprise_search", - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - }}}, - }, - "traffic_filter": []interface{}{"0.0.0.0/0", "192.168.10.0/24"}, - } -} - -func newElasticsearchSample() map[string]interface{} { - return map[string]interface{}{ - "ref_id": "main-elasticsearch", - "resource_id": mock.ValidClusterID, - "region": "us-east-1", - "config": []interface{}{map[string]interface{}{ - "user_settings_yaml": "some.setting: value", - "user_settings_override_yaml": "some.setting: value2", - "user_settings_json": "{\"some.setting\":\"value\"}", - "user_settings_override_json": "{\"some.setting\":\"value2\"}", - }}, - "topology": []interface{}{map[string]interface{}{ - "id": "hot_content", - "instance_configuration_id": "aws.data.highio.i3", - "size": "2g", - "node_type_data": "true", - "node_type_ingest": "true", - "node_type_master": "true", - "node_type_ml": "false", - "zone_count": 1, - }}, - } -} - -func newKibanaSample() map[string]interface{} { - return map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-kibana", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.kibana.r5d", - "size": "1g", - "zone_count": 1, - }, - }, - } -} - -func newApmSample() map[string]interface{} { - return map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-apm", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - // Reproduces the case where the default fields are set. - "config": []interface{}{map[string]interface{}{ - "debug_enabled": false, - }}, - "topology": []interface{}{map[string]interface{}{ - "instance_configuration_id": "aws.apm.r5d", - "size": "0.5g", - "zone_count": 1, - }}, - } -} - -func newEnterpriseSearchSample() map[string]interface{} { - return map[string]interface{}{ - "elasticsearch_cluster_ref_id": "main-elasticsearch", - "ref_id": "main-enterprise_search", - "resource_id": mock.ValidClusterID, - "version": "7.7.0", - "region": "us-east-1", - "topology": []interface{}{ - map[string]interface{}{ - "instance_configuration_id": "aws.enterprisesearch.m5d", - "size": "2g", - "zone_count": 1, - "node_type_appserver": true, - "node_type_connector": true, - "node_type_worker": true, - }, - }, - } -} - -func newObservabilitySample() map[string]interface{} { - return map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "ref_id": "main-elasticsearch", - "logs": true, - "metrics": true, - } -} diff --git a/ec/ecresource/deploymentresource/testutil_func.go b/ec/ecresource/deploymentresource/testutil_func.go deleted file mode 100644 index 945f45f1d..000000000 --- a/ec/ecresource/deploymentresource/testutil_func.go +++ /dev/null @@ -1,126 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "encoding/json" - "io" - "os" - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) - -// parseDeploymentTemplate is a test helper which parse a file by path and -// returns a models.DeploymentTemplateInfoV2. -func parseDeploymentTemplate(t *testing.T, name string) *models.DeploymentTemplateInfoV2 { - t.Helper() - f, err := os.Open(name) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - var res models.DeploymentTemplateInfoV2 - if err := json.NewDecoder(f).Decode(&res); err != nil { - t.Fatal(err) - } - - // Enriches the elasticsearch DT with the current DT. - if len(res.DeploymentTemplate.Resources.Elasticsearch) > 0 { - res.DeploymentTemplate.Resources.Elasticsearch[0].Plan.DeploymentTemplate = &models.DeploymentTemplateReference{ - ID: res.ID, - } - } - - return &res -} - -func openDeploymentGet(t *testing.T, name string) *models.DeploymentGetResponse { - t.Helper() - f, err := os.Open(name) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - var res models.DeploymentGetResponse - if err := json.NewDecoder(f).Decode(&res); err != nil { - t.Fatal(err) - } - return &res -} - -func enrichWithEmptyTopologies(tpl, want *models.ElasticsearchPayload) []*models.ElasticsearchPayload { - tpl.DisplayName = want.DisplayName - tpl.RefID = want.RefID - tpl.Region = want.Region - tpl.Settings = want.Settings - tpl.Plan.AutoscalingEnabled = want.Plan.AutoscalingEnabled - tpl.Plan.Elasticsearch = want.Plan.Elasticsearch - tpl.Plan.Transient = want.Plan.Transient - - for i, t := range tpl.Plan.ClusterTopology { - for _, w := range want.Plan.ClusterTopology { - if t.ID == w.ID { - tpl.Plan.ClusterTopology[i] = w - } - } - } - - return []*models.ElasticsearchPayload{tpl} -} - -func readerDeploymentUpdateToESPayload(t *testing.T, rc io.Reader, nr bool, tplID string) *models.ElasticsearchPayload { - t.Helper() - - var tpl models.DeploymentUpdateRequest - if err := json.NewDecoder(rc).Decode(&tpl); err != nil { - t.Fatal(err) - } - - return enrichElasticsearchTemplate( - tpl.Resources.Elasticsearch[0], - tplID, - "", - nr, - ) -} - -func readerToESPayload(t *testing.T, rc io.Reader, nr bool) *models.ElasticsearchPayload { - t.Helper() - - var tpl models.DeploymentTemplateInfoV2 - if err := json.NewDecoder(rc).Decode(&tpl); err != nil { - t.Fatal(err) - } - - return enrichElasticsearchTemplate( - tpl.DeploymentTemplate.Resources.Elasticsearch[0], - *tpl.ID, - "", - nr, - ) -} - -func newDeploymentRD(t *testing.T, id string, raw map[string]interface{}) *schema.ResourceData { - rd := schema.TestResourceDataRaw(t, newSchema(), raw) - rd.SetId(id) - return rd -} diff --git a/ec/ecresource/trafficfilterresource/resource.go b/ec/ecresource/deploymentresource/topology/v1/topology.go similarity index 56% rename from ec/ecresource/trafficfilterresource/resource.go rename to ec/ecresource/deploymentresource/topology/v1/topology.go index 25ebfa288..5e97eb169 100644 --- a/ec/ecresource/trafficfilterresource/resource.go +++ b/ec/ecresource/deploymentresource/topology/v1/topology.go @@ -15,30 +15,24 @@ // specific language governing permissions and limitations // under the License. -package trafficfilterresource +package v1 import ( - "time" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/types" ) -// Resource returns the ec_deployment_traffic_filter resource schema. -func Resource() *schema.Resource { - return &schema.Resource{ - Description: "Elastic Cloud deployment traffic filtering rules", - Schema: newSchema(), - - CreateContext: create, - ReadContext: read, - UpdateContext: update, - DeleteContext: delete, +type TopologyTF struct { + InstanceConfigurationId types.String `tfsdk:"instance_configuration_id"` + Size types.String `tfsdk:"size"` + SizeResource types.String `tfsdk:"size_resource"` + ZoneCount types.Int64 `tfsdk:"zone_count"` +} - Importer: &schema.ResourceImporter{ - StateContext: schema.ImportStatePassthroughContext, - }, - Timeouts: &schema.ResourceTimeout{ - Default: schema.DefaultTimeout(10 * time.Minute), - }, - } +type Topology struct { + InstanceConfigurationId *string `tfsdk:"instance_configuration_id"` + Size *string `tfsdk:"size"` + SizeResource *string `tfsdk:"size_resource"` + ZoneCount int `tfsdk:"zone_count"` } + +type Topologies []Topology diff --git a/ec/ecresource/deploymentresource/traffic_filter.go b/ec/ecresource/deploymentresource/traffic_filter.go deleted file mode 100644 index 835bab808..000000000 --- a/ec/ecresource/deploymentresource/traffic_filter.go +++ /dev/null @@ -1,68 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -// flattenTrafficFiltering parses a deployment's traffic filtering settings. -func flattenTrafficFiltering(settings *models.DeploymentSettings) *schema.Set { - if settings == nil || settings.TrafficFilterSettings == nil { - return nil - } - - var rules []interface{} - for _, rule := range settings.TrafficFilterSettings.Rulesets { - rules = append(rules, rule) - } - - if len(rules) > 0 { - return schema.NewSet(schema.HashString, rules) - } - - return nil -} - -// expandTrafficFilterCreate expands the flattened "traffic_filter" settings to -// a DeploymentCreateRequest. -func expandTrafficFilterCreate(set *schema.Set, req *models.DeploymentCreateRequest) { - if set == nil || req == nil { - return - } - - if set.Len() == 0 { - return - } - - if req.Settings == nil { - req.Settings = &models.DeploymentCreateSettings{} - } - - if req.Settings.TrafficFilterSettings == nil { - req.Settings.TrafficFilterSettings = &models.TrafficFilterSettings{} - } - - req.Settings.TrafficFilterSettings.Rulesets = append( - req.Settings.TrafficFilterSettings.Rulesets, - util.ItemsToString(set.List())..., - ) -} diff --git a/ec/ecresource/deploymentresource/update.go b/ec/ecresource/deploymentresource/update.go index 26f33ab53..8c6498fe9 100644 --- a/ec/ecresource/deploymentresource/update.go +++ b/ec/ecresource/deploymentresource/update.go @@ -19,73 +19,185 @@ package deploymentresource import ( "context" - "strings" "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi" - "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" ) -// Update syncs the remote state with the local. -func updateResource(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) +func (r *Resource) Update(ctx context.Context, req resource.UpdateRequest, resp *resource.UpdateResponse) { + var plan v2.DeploymentTF - if hasDeploymentChange(d) { - if err := updateDeployment(ctx, d, client); err != nil { - return diag.FromErr(err) - } - } + resp.Diagnostics.Append(req.Plan.Get(ctx, &plan)...) - if err := handleTrafficFilterChange(d, client); err != nil { - return diag.FromErr(err) + if resp.Diagnostics.HasError() { + return } - if err := handleRemoteClusters(d, client); err != nil { - return diag.FromErr(err) + var state v2.DeploymentTF + + resp.Diagnostics.Append(req.State.Get(ctx, &state)...) + + if resp.Diagnostics.HasError() { + return } - return readResource(ctx, d, meta) -} + updateReq, diags := plan.UpdateRequest(ctx, r.client, state) -func updateDeployment(_ context.Context, d *schema.ResourceData, client *api.API) error { - req, err := updateResourceToModel(d, client) - if err != nil { - return err + if diags.HasError() { + resp.Diagnostics.Append(diags...) + return } res, err := deploymentapi.Update(deploymentapi.UpdateParams{ - API: client, - DeploymentID: d.Id(), - Request: req, + API: r.client, + DeploymentID: plan.Id.Value, + Request: updateReq, Overrides: deploymentapi.PayloadOverrides{ - Version: d.Get("version").(string), - Region: d.Get("region").(string), + Version: plan.Version.Value, + Region: plan.Region.Value, }, }) if err != nil { - return multierror.NewPrefixed("failed updating deployment", err) + resp.Diagnostics.AddError("failed updating deployment", err.Error()) + return } - if err := WaitForPlanCompletion(client, d.Id()); err != nil { - return multierror.NewPrefixed("failed tracking update progress", err) + if err := WaitForPlanCompletion(r.client, plan.Id.Value); err != nil { + resp.Diagnostics.AddError("failed tracking update progress", err.Error()) + return } - return parseCredentials(d, res.Resources) + resp.Diagnostics.Append(HandleTrafficFilterChange(ctx, r.client, plan, state)...) + + resp.Diagnostics.Append(v2.HandleRemoteClusters(ctx, r.client, plan.Id.Value, plan.Elasticsearch)...) + + deployment, diags := r.read(ctx, plan.Id.Value, &state, plan, res.Resources) + + resp.Diagnostics.Append(diags...) + + if deployment == nil { + resp.Diagnostics.AddError("cannot read just updated resource", "") + resp.State.RemoveResource(ctx) + return + } + + resp.Diagnostics.Append(resp.State.Set(ctx, deployment)...) } -// hasDeploymentChange checks if there's any change in the resource attributes -// except in the "traffic_filter" prefixed keys. If so, it returns true. -func hasDeploymentChange(d *schema.ResourceData) bool { - for attr := range d.State().Attributes { - if strings.HasPrefix(attr, "traffic_filter") { - continue +func HandleTrafficFilterChange(ctx context.Context, client *api.API, plan, state v2.DeploymentTF) diag.Diagnostics { + if plan.TrafficFilter.IsNull() || plan.TrafficFilter.Equal(state.TrafficFilter) { + return nil + } + + var planRules, stateRules ruleSet + if diags := plan.TrafficFilter.ElementsAs(ctx, &planRules, true); diags.HasError() { + return diags + } + + if diags := state.TrafficFilter.ElementsAs(ctx, &stateRules, true); diags.HasError() { + return diags + } + + var rulesToAdd, rulesToDelete []string + + for _, rule := range planRules { + if !stateRules.exist(rule) { + rulesToAdd = append(rulesToAdd, rule) + } + } + + for _, rule := range stateRules { + if !planRules.exist(rule) { + rulesToDelete = append(rulesToDelete, rule) } - // Check if any of the resource attributes has a change. - if d.HasChange(attr) { + } + + var diags diag.Diagnostics + for _, rule := range rulesToAdd { + if err := associateRule(rule, plan.Id.Value, client); err != nil { + diags.AddError("cannot associate traffic filter rule", err.Error()) + } + } + + for _, rule := range rulesToDelete { + if err := removeRule(rule, plan.Id.Value, client); err != nil { + diags.AddError("cannot remove traffic filter rule", err.Error()) + } + } + + return diags +} + +type ruleSet []string + +func (rs ruleSet) exist(rule string) bool { + for _, r := range rs { + if r == rule { return true } } return false } + +var ( + GetAssociation = trafficfilterapi.Get + CreateAssociation = trafficfilterapi.CreateAssociation + DeleteAssociation = trafficfilterapi.DeleteAssociation +) + +func associateRule(ruleID, deploymentID string, client *api.API) error { + res, err := GetAssociation(trafficfilterapi.GetParams{ + API: client, ID: ruleID, IncludeAssociations: true, + }) + if err != nil { + return err + } + + // When the rule has already been associated, return. + for _, assoc := range res.Associations { + if deploymentID == *assoc.ID { + return nil + } + } + + // Create assignment. + if err := CreateAssociation(trafficfilterapi.CreateAssociationParams{ + API: client, ID: ruleID, EntityType: "deployment", EntityID: deploymentID, + }); err != nil { + return err + } + return nil +} + +func removeRule(ruleID, deploymentID string, client *api.API) error { + res, err := GetAssociation(trafficfilterapi.GetParams{ + API: client, ID: ruleID, IncludeAssociations: true, + }) + + // If the rule is gone (403 or 404), return nil. + if err != nil { + if util.TrafficFilterNotFound(err) { + return nil + } + return err + } + + // If the rule is found, then delete the association. + for _, assoc := range res.Associations { + if deploymentID == *assoc.ID { + return DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ + API: client, + ID: ruleID, + EntityID: *assoc.ID, + EntityType: *assoc.EntityType, + }) + } + } + + return nil +} diff --git a/ec/ecresource/deploymentresource/update_test.go b/ec/ecresource/deploymentresource/update_test.go index e0d6c9d4a..a1e294c8b 100644 --- a/ec/ecresource/deploymentresource/update_test.go +++ b/ec/ecresource/deploymentresource/update_test.go @@ -15,81 +15,200 @@ // specific language governing permissions and limitations // under the License. -package deploymentresource +package deploymentresource_test import ( + "context" + "fmt" "testing" - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" + "github.com/elastic/cloud-sdk-go/pkg/models" + "github.com/elastic/cloud-sdk-go/pkg/util/ec" + "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" + v2 "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource/deployment/v2" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) -func Test_hasDeploymentChange(t *testing.T) { - unchanged := Resource().Data(util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: newSampleLegacyDeployment(), - }).State()) - - changesToTrafficFilter := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "traffic_filter": []interface{}{"1.1.1.1"}, - }, - }) - - changesToName := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{"name": "some name"}, - }) - - changesToRegion := util.NewResourceData(t, util.ResDataParams{ - ID: mock.ValidClusterID, - Schema: newSchema(), - State: map[string]interface{}{ - "name": "some name", - "region": "some-region", - }, - }) +func Test_handleTrafficFilterChange(t *testing.T) { + deploymentID := "deployment_unique_id" type args struct { - d *schema.ResourceData + plan []string + state []string } + tests := []struct { - name string - args args - want bool + name string + args args + getRule func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) + createRule func(params trafficfilterapi.CreateAssociationParams) error + deleteRule func(params trafficfilterapi.DeleteAssociationParams) error }{ { - name: "when a new resource is persisted and has no changes.", - args: args{d: unchanged}, - want: false, + name: "should not call the association API when plan and state contain same rules", + args: args{ + plan: []string{"rule1"}, + state: []string{"rule1"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + err := "GetRule function SHOULD NOT be called" + t.Errorf(err) + return nil, fmt.Errorf(err) + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, }, + { - name: "when a new resource has some changes in traffic_filter", - args: args{d: changesToTrafficFilter}, - want: false, + name: "should add rule when plan contains it and state doesn't contain it", + args: args{ + plan: []string{"rule1", "rule2"}, + state: []string{"rule1"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{}, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + assert.Equal(t, "rule2", params.ID) + return nil + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, }, + { - name: "when a new resource is has some changes in name", - args: args{d: changesToName}, - want: true, + name: "should not add rule when plan contains it and state doesn't contain it but the association already exists", + args: args{ + plan: []string{"rule1", "rule2"}, + state: []string{"rule1"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{ + Associations: []*models.FilterAssociation{ + { + ID: &deploymentID, + EntityType: ec.String("deployment"), + }, + }, + }, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, }, + { - name: "when a new resource is has some changes in name", - args: args{d: changesToRegion}, - want: true, + name: "should delete rule when plan doesn't contain it and state does contain it", + args: args{ + plan: []string{"rule1"}, + state: []string{"rule1", "rule2"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{ + Associations: []*models.FilterAssociation{ + { + ID: &deploymentID, + EntityType: ec.String("deployment"), + }, + }, + }, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + assert.Equal(t, "rule2", params.ID) + return nil + }, + }, + + { + name: "should not delete rule when plan doesn't contain it and state does contain it but the association is already gone", + args: args{ + plan: []string{"rule1"}, + state: []string{"rule1", "rule2"}, + }, + getRule: func(trafficfilterapi.GetParams) (*models.TrafficFilterRulesetInfo, error) { + return &models.TrafficFilterRulesetInfo{}, nil + }, + createRule: func(params trafficfilterapi.CreateAssociationParams) error { + err := "CreateRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, + deleteRule: func(params trafficfilterapi.DeleteAssociationParams) error { + err := "DeleteRule function SHOULD NOT be called" + t.Errorf(err) + return fmt.Errorf(err) + }, }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := hasDeploymentChange(tt.args.d) - assert.Equal(t, tt.want, got) + getRule := deploymentresource.GetAssociation + createRule := deploymentresource.CreateAssociation + deleteRule := deploymentresource.DeleteAssociation + + defer func() { + deploymentresource.GetAssociation = getRule + deploymentresource.CreateAssociation = createRule + deploymentresource.DeleteAssociation = deleteRule + }() + + deploymentresource.GetAssociation = tt.getRule + deploymentresource.CreateAssociation = tt.createRule + deploymentresource.DeleteAssociation = tt.deleteRule + + plan := v2.Deployment{ + Id: deploymentID, + TrafficFilter: tt.args.plan, + } + + state := v2.Deployment{ + Id: deploymentID, + TrafficFilter: tt.args.state, + } + + var planTF v2.DeploymentTF + + diags := tfsdk.ValueFrom(context.Background(), &plan, v2.DeploymentSchema().Type(), &planTF) + + assert.Nil(t, diags) + + var stateTF v2.DeploymentTF + + diags = tfsdk.ValueFrom(context.Background(), &state, v2.DeploymentSchema().Type(), &stateTF) + + assert.Nil(t, diags) + + diags = deploymentresource.HandleTrafficFilterChange(context.Background(), nil, planTF, stateTF) + + assert.Nil(t, diags) }) + } + } diff --git a/ec/ecresource/deploymentresource/update_traffic_rules.go b/ec/ecresource/deploymentresource/update_traffic_rules.go deleted file mode 100644 index 3bf66d60b..000000000 --- a/ec/ecresource/deploymentresource/update_traffic_rules.go +++ /dev/null @@ -1,114 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" -) - -func handleTrafficFilterChange(d *schema.ResourceData, client *api.API) error { - if !d.HasChange("traffic_filter") { - return nil - } - - var additions, deletions = getChange(d.GetChange("traffic_filter")) - for _, ruleID := range additions.List() { - if err := associateRule(ruleID.(string), d.Id(), client); err != nil { - return err - } - } - - for _, ruleID := range deletions.List() { - if err := removeRule(ruleID.(string), d.Id(), client); err != nil { - return err - } - } - - return nil -} - -func getChange(oldInterface, newInterface interface{}) (add, delete *schema.Set) { - var old, new *schema.Set - if s, ok := oldInterface.(*schema.Set); ok { - old = s - } - if s, ok := newInterface.(*schema.Set); ok { - new = s - } - - add = new.Difference(old) - delete = old.Difference(new) - - return add, delete -} - -func associateRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: ruleID, IncludeAssociations: true, - }) - if err != nil { - return err - } - - // When the rule has already been associated, return. - for _, assoc := range res.Associations { - if deploymentID == *assoc.ID { - return nil - } - } - - // Create assignment. - if err := trafficfilterapi.CreateAssociation(trafficfilterapi.CreateAssociationParams{ - API: client, ID: ruleID, EntityType: "deployment", EntityID: deploymentID, - }); err != nil { - return err - } - return nil -} - -func removeRule(ruleID, deploymentID string, client *api.API) error { - res, err := trafficfilterapi.Get(trafficfilterapi.GetParams{ - API: client, ID: ruleID, IncludeAssociations: true, - }) - - // If the rule is gone (403 or 404), return nil. - if err != nil { - if util.TrafficFilterNotFound(err) { - return nil - } - return err - } - - // If the rule is found, then delete the association. - for _, assoc := range res.Associations { - if deploymentID == *assoc.ID { - return trafficfilterapi.DeleteAssociation(trafficfilterapi.DeleteAssociationParams{ - API: client, - ID: ruleID, - EntityID: *assoc.ID, - EntityType: *assoc.EntityType, - }) - } - } - - return nil -} diff --git a/ec/ecresource/deploymentresource/update_traffic_rules_test.go b/ec/ecresource/deploymentresource/update_traffic_rules_test.go deleted file mode 100644 index 89a235e5b..000000000 --- a/ec/ecresource/deploymentresource/update_traffic_rules_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package deploymentresource - -import ( - "testing" - - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" -) - -func Test_getChange(t *testing.T) { - type args struct { - oldInterface interface{} - newInterface interface{} - } - tests := []struct { - name string - args args - wantAdditions []interface{} - wantDeletions []interface{} - }{ - { - name: "diffs totally different slices", - args: args{ - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - newInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 3", "rule 4", - }), - }, - wantAdditions: []interface{}{"rule 4", "rule 3"}, - wantDeletions: []interface{}{"rule 1", "rule 2"}, - }, - { - name: "diffs equal slices", - args: args{ - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - newInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - }, - wantAdditions: make([]interface{}, 0), - wantDeletions: make([]interface{}, 0), - }, - { - name: "diffs equal slightly slices", - args: args{ - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - newInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", "rule 3", - }), - }, - wantAdditions: []interface{}{"rule 3"}, - wantDeletions: make([]interface{}, 0), - }, - { - name: "diffs a removal", - args: args{ - newInterface: schema.NewSet(schema.HashString, nil), - oldInterface: schema.NewSet(schema.HashString, []interface{}{ - "rule 1", "rule 2", - }), - }, - wantDeletions: []interface{}{"rule 1", "rule 2"}, - wantAdditions: make([]interface{}, 0), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotAdditions, gotDeletions := getChange(tt.args.oldInterface, tt.args.newInterface) - assert.Equal(t, tt.wantAdditions, gotAdditions.List(), "Additions") - assert.Equal(t, tt.wantDeletions, gotDeletions.List(), "Deletions") - }) - } -} diff --git a/ec/ecresource/trafficfilterassocresource/expanders.go b/ec/ecresource/deploymentresource/utils/definitions.go similarity index 62% rename from ec/ecresource/trafficfilterassocresource/expanders.go rename to ec/ecresource/deploymentresource/utils/definitions.go index 185c53635..29505cf2b 100644 --- a/ec/ecresource/trafficfilterassocresource/expanders.go +++ b/ec/ecresource/deploymentresource/utils/definitions.go @@ -15,19 +15,14 @@ // specific language governing permissions and limitations // under the License. -package trafficfilterassocresource +package utils -import ( - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/trafficfilterapi" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" -) +import "github.com/blang/semver" -const entityType = "deployment" +const ( + MinimumZoneCount = 1 +) -func expand(d *schema.ResourceData) trafficfilterapi.CreateAssociationParams { - return trafficfilterapi.CreateAssociationParams{ - ID: d.Get("traffic_filter_id").(string), - EntityID: d.Get("deployment_id").(string), - EntityType: entityType, - } -} +var ( + DataTiersVersion = semver.MustParse("7.10.0") +) diff --git a/ec/internal/util/changes.go b/ec/ecresource/deploymentresource/utils/missing_field_error.go similarity index 66% rename from ec/internal/util/changes.go rename to ec/ecresource/deploymentresource/utils/missing_field_error.go index 431dabaca..0c2d5338e 100644 --- a/ec/internal/util/changes.go +++ b/ec/ecresource/deploymentresource/utils/missing_field_error.go @@ -15,14 +15,10 @@ // specific language governing permissions and limitations // under the License. -package util +package utils -import "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +import "fmt" -// ObjectRemoved takes in a ResourceData and a key string, returning whether -// or not the object ([]intreface{} type) is being removed in the current -// change. -func ObjectRemoved(d *schema.ResourceData, key string) bool { - old, new := d.GetChange(key) - return len(old.([]interface{})) > 0 && len(new.([]interface{})) == 0 +func MissingField(field string) error { + return fmt.Errorf("server response doesn't contain deployment '%s'", field) } diff --git a/ec/ecresource/elasticsearchkeystoreresource/create.go b/ec/ecresource/elasticsearchkeystoreresource/create.go index bade94c14..dcca8d4ce 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/create.go +++ b/ec/ecresource/elasticsearchkeystoreresource/create.go @@ -22,29 +22,54 @@ import ( "strconv" "strings" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) -// create will create an item in the Elasticsearch keystore -func create(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - deploymentID := d.Get("deployment_id").(string) - settingName := d.Get("setting_name").(string) +// Create will create an item in the Elasticsearch keystore +func (r Resource) Create(ctx context.Context, request resource.CreateRequest, response *resource.CreateResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + + diags := request.Plan.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } if _, err := eskeystoreapi.Update(eskeystoreapi.UpdateParams{ - API: client, - DeploymentID: deploymentID, - Contents: expandModel(d), + API: r.client, + DeploymentID: newState.DeploymentID.Value, + Contents: expandModel(ctx, newState), }); err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) + return + } + + newState.ID = types.String{Value: hashID(newState.DeploymentID.Value, newState.SettingName.Value)} + + found, diags := r.read(ctx, newState.DeploymentID.Value, &newState) + response.Diagnostics.Append(diags...) + if !found { + response.Diagnostics.AddError( + "Failed to read Elasticsearch keystore after create.", + "Failed to read Elasticsearch keystore after create.", + ) + response.State.RemoveResource(ctx) + return + } + if response.Diagnostics.HasError() { + return } - d.SetId(hashID(deploymentID, settingName)) - return read(ctx, d, meta) + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) } func hashID(elem ...string) string { diff --git a/ec/ecresource/elasticsearchkeystoreresource/delete.go b/ec/ecresource/elasticsearchkeystoreresource/delete.go index a4fcd145f..2edd3c146 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/delete.go +++ b/ec/ecresource/elasticsearchkeystoreresource/delete.go @@ -20,34 +20,37 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" ) -// delete will delete an existing element in the Elasticsearch keystore -func delete(ctx context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - client := meta.(*api.API) - contents := expandModel(d) - settingName := d.Get("setting_name").(string) +// Delete will delete an existing element in the Elasticsearch keystore +func (r Resource) Delete(ctx context.Context, request resource.DeleteRequest, response *resource.DeleteResponse) { + + if !resourceReady(r, &response.Diagnostics) { + return + } + + var state modelV0 + + diags := request.State.Get(ctx, &state) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } // Since we're using the Update API (PATCH method), we need to se the Value // field to nil for the keystore setting to be unset. - if secret, ok := contents.Secrets[settingName]; ok { - secret.Value = nil - contents.Secrets[settingName] = secret - } + state.Value = types.String{Null: true} + contents := expandModel(ctx, state) if _, err := eskeystoreapi.Update(eskeystoreapi.UpdateParams{ - API: client, - DeploymentID: d.Get("deployment_id").(string), + API: r.client, + DeploymentID: state.DeploymentID.Value, Contents: contents, }); err != nil { - return diag.FromErr(err) + response.Diagnostics.AddError(err.Error(), err.Error()) } - - d.SetId("") - return read(ctx, d, meta) } diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders.go b/ec/ecresource/elasticsearchkeystoreresource/expanders.go index 9ed695f8b..009924796 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders.go @@ -18,17 +18,16 @@ package elasticsearchkeystoreresource import ( + "context" "encoding/json" "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) -func expandModel(d *schema.ResourceData) *models.KeystoreContents { +func expandModel(ctx context.Context, state modelV0) *models.KeystoreContents { var value interface{} - secretName := d.Get("setting_name").(string) - strVal := d.Get("value").(string) + secretName := state.SettingName.Value + strVal := state.Value.Value // Tries to unmarshal the contents of the value into an `interface{}`, // if it fails, then the contents aren't a JSON object. @@ -39,7 +38,7 @@ func expandModel(d *schema.ResourceData) *models.KeystoreContents { return &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ secretName: { - AsFile: ec.Bool(d.Get("as_file").(bool)), + AsFile: &state.AsFile.Value, Value: value, }, }, diff --git a/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go b/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go index a1ebe37f8..6fd977465 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go +++ b/ec/ecresource/elasticsearchkeystoreresource/expanders_test.go @@ -18,18 +18,21 @@ package elasticsearchkeystoreresource import ( + "context" "testing" + "github.com/stretchr/testify/assert" + + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" "github.com/elastic/cloud-sdk-go/pkg/models" "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" ) func Test_expandModel(t *testing.T) { type args struct { - d *schema.ResourceData + state modelV0 } tests := []struct { name string @@ -38,14 +41,14 @@ func Test_expandModel(t *testing.T) { }{ { name: "parses the resource with a string value", - args: args{d: newResourceData(t, resDataParams{ - ID: "some-random-id", - Resources: map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - }, - })}, + args: args{state: modelV0{ + + ID: types.String{Value: "some-random-id"}, + DeploymentID: types.String{Value: mock.ValidClusterID}, + SettingName: types.String{Value: "my_secret"}, + Value: types.String{Value: "supersecret"}, + AsFile: types.Bool{Value: false}, + }}, want: &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ "my_secret": { @@ -57,12 +60,12 @@ func Test_expandModel(t *testing.T) { }, { name: "parses the resource with a json formatted value", - args: args{d: newResourceData(t, resDataParams{ - ID: "some-random-id", - Resources: map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": `{ + args: args{state: modelV0{ + + ID: types.String{Value: "some-random-id"}, + DeploymentID: types.String{Value: mock.ValidClusterID}, + SettingName: types.String{Value: "my_secret"}, + Value: types.String{Value: `{ "type": "service_account", "project_id": "project-id", "private_key_id": "key-id", @@ -73,10 +76,10 @@ func Test_expandModel(t *testing.T) { "token_uri": "https://accounts.google.com/o/oauth2/token", "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/service-account-email" -}`, - "as_file": true, - }, - })}, +}`}, + AsFile: types.Bool{Value: true}, + }, + }, want: &models.KeystoreContents{ Secrets: map[string]models.KeystoreSecret{ "my_secret": { @@ -100,7 +103,7 @@ func Test_expandModel(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got := expandModel(tt.args.d) + got := expandModel(context.Background(), tt.args.state) assert.Equal(t, tt.want, got) }) } diff --git a/ec/ecresource/elasticsearchkeystoreresource/read.go b/ec/ecresource/elasticsearchkeystoreresource/read.go index beec83b44..9968c7fd1 100644 --- a/ec/ecresource/elasticsearchkeystoreresource/read.go +++ b/ec/ecresource/elasticsearchkeystoreresource/read.go @@ -20,51 +20,68 @@ package elasticsearchkeystoreresource import ( "context" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/eskeystoreapi" + "github.com/elastic/cloud-sdk-go/pkg/models" ) -// read queries the remote Elasticsearch keystore state and updates the local state. -func read(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics { - var client = meta.(*api.API) - deploymentID := d.Get("deployment_id").(string) +// Read queries the remote Elasticsearch keystore state and updates the local state. +func (r Resource) Read(ctx context.Context, request resource.ReadRequest, response *resource.ReadResponse) { + if !resourceReady(r, &response.Diagnostics) { + return + } + + var newState modelV0 + diags := request.State.Get(ctx, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + + found, diags := r.read(ctx, newState.DeploymentID.Value, &newState) + response.Diagnostics.Append(diags...) + if response.Diagnostics.HasError() { + return + } + if !found { + response.State.RemoveResource(ctx) + return + } + + // Finally, set the state + response.Diagnostics.Append(response.State.Set(ctx, newState)...) +} + +func (r Resource) read(ctx context.Context, deploymentID string, state *modelV0) (found bool, diags diag.Diagnostics) { res, err := eskeystoreapi.Get(eskeystoreapi.GetParams{ - API: client, + API: r.client, DeploymentID: deploymentID, }) if err != nil { - return diag.FromErr(err) - } - - if err := modelToState(d, res); err != nil { - return diag.FromErr(err) + diags.AddError(err.Error(), err.Error()) + return true, diags } - return nil + return modelToState(ctx, res, state) } -// This modelToState function is a little different than others in that it does +// This modelToState function is a little different from others in that it does // not set any other fields than "as_file". This is because the "value" is not -// returned by the API for obvious reasons and thus we cannot reconcile that the +// returned by the API for obvious reasons, and thus we cannot reconcile that the // value of the secret is the same in the remote as it is in the configuration. -func modelToState(d *schema.ResourceData, res *models.KeystoreContents) error { - if secret, ok := res.Secrets[d.Get("setting_name").(string)]; ok { +func modelToState(ctx context.Context, res *models.KeystoreContents, state *modelV0) (found bool, diags diag.Diagnostics) { + if secret, ok := res.Secrets[state.SettingName.Value]; ok { if secret.AsFile != nil { - if err := d.Set("as_file", *secret.AsFile); err != nil { - return err - } + state.AsFile = types.Bool{Value: *secret.AsFile} } - return nil + return true, nil } - // When the secret is not found in the returned map of secrets, set the id - // to an empty string so that the resource is marked as destroyed. Would - // only happen if secrets are removed from the underlying Deployment. - d.SetId("") - return nil + // When the secret is not found in the returned map of secrets, the resource should be removed from state. + // Would only happen if secrets are removed from the underlying Deployment. + return false, nil } diff --git a/ec/ecresource/elasticsearchkeystoreresource/read_test.go b/ec/ecresource/elasticsearchkeystoreresource/read_test.go deleted file mode 100644 index e8b568d9e..000000000 --- a/ec/ecresource/elasticsearchkeystoreresource/read_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package elasticsearchkeystoreresource - -import ( - "testing" - - "github.com/elastic/cloud-sdk-go/pkg/api/mock" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" -) - -func Test_modelToState(t *testing.T) { - esKeystoreSchemaArg := schema.TestResourceDataRaw(t, newSchema(), map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - "as_file": false, // This field is overridden. - }) - esKeystoreSchemaArg.SetId(mock.ValidClusterID) - - esKeystoreSchemaArgMissing := schema.TestResourceDataRaw(t, newSchema(), map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - "as_file": false, - }) - esKeystoreSchemaArgMissing.SetId(mock.ValidClusterID) - - type args struct { - d *schema.ResourceData - res *models.KeystoreContents - } - tests := []struct { - name string - args args - want *schema.ResourceData - err error - }{ - { - name: "flattens the keystore secret (not really since the value is not returned)", - args: args{ - d: esKeystoreSchemaArg, - res: &models.KeystoreContents{ - Secrets: map[string]models.KeystoreSecret{ - "my_secret": { - AsFile: ec.Bool(true), - }, - "some_other_secret": { - AsFile: ec.Bool(false), - }, - }, - }, - }, - want: newResourceData(t, resDataParams{ - ID: mock.ValidClusterID, - Resources: map[string]interface{}{ - "deployment_id": mock.ValidClusterID, - "setting_name": "my_secret", - "value": "supersecret", - "as_file": true, - }, - }), - }, - { - name: "unsets the ID when our secret is not in the returned list of secrets", - args: args{ - d: esKeystoreSchemaArgMissing, - res: &models.KeystoreContents{ - Secrets: map[string]models.KeystoreSecret{ - "my_other_secret": { - AsFile: ec.Bool(true), - }, - "some_other_secret": { - AsFile: ec.Bool(false), - }, - }, - }, - }, - want: newResourceData(t, resDataParams{ - Resources: map[string]interface{}{}, - }), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := modelToState(tt.args.d, tt.args.res) - if tt.err != nil || err != nil { - assert.EqualError(t, err, tt.err.Error()) - } else { - assert.NoError(t, err) - } - - wantState := tt.want.State() - gotState := tt.args.d.State() - - if wantState != nil && gotState != nil { - assert.Equal(t, wantState.Attributes, gotState.Attributes) - return - } - }) - } -} diff --git a/ec/ecresource/elasticsearchkeystoreresource/resource_test.go b/ec/ecresource/elasticsearchkeystoreresource/resource_test.go new file mode 100644 index 000000000..4100b64b4 --- /dev/null +++ b/ec/ecresource/elasticsearchkeystoreresource/resource_test.go @@ -0,0 +1,302 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package elasticsearchkeystoreresource_test + +import ( + "net/url" + "regexp" + "testing" + + "github.com/hashicorp/terraform-plugin-framework/providerserver" + "github.com/hashicorp/terraform-plugin-go/tfprotov6" + r "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + + "github.com/elastic/cloud-sdk-go/pkg/api" + "github.com/elastic/cloud-sdk-go/pkg/api/mock" + + "github.com/elastic/terraform-provider-ec/ec" +) + +func TestResourceElasticsearchKeyStore(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readDeployment(), + createResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + updateResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + readResponse(), + readDeployment(), + deleteResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + Check: checkResource1(), + }, + { // Update resource + Config: externalKeystore2, + Check: checkResource2(), + }, + { // Delete resource + Destroy: true, + Config: externalKeystore1, + }, + }, + }) +} + +func TestResourceElasticsearchKeyStore_failedCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + mock.New500Response(mock.SampleInternalError().Response.Body), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceElasticsearchKeyStore_failedReadAfterCreate(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readDeployment(), + createResponse(), + mock.New500Response(mock.SampleInternalError().Response.Body), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + ExpectError: regexp.MustCompile(`internal.server.error: There was an internal server error`), + }, + }, + }) +} + +func TestResourceElasticsearchKeyStore_notFoundAfterCreate_and_gracefulDeletion(t *testing.T) { + + r.UnitTest(t, r.TestCase{ + ProtoV6ProviderFactories: protoV6ProviderFactoriesWithMockClient( + api.NewMock( + readDeployment(), + createResponse(), + readDeployment(), + emptyReadResponse(), + readDeployment(), + emptyReadResponse(), + ), + ), + Steps: []r.TestStep{ + { // Create resource + Config: externalKeystore1, + Check: checkResource1(), + ExpectError: regexp.MustCompile(`Failed to read Elasticsearch keystore after create.`), + }, + }, + }) +} + +const externalKeystore1 = ` +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = "0a592ab2c5baf0fa95c77ac62135782e" + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = "hella" +} +` + +const externalKeystore2 = ` +resource "ec_deployment_elasticsearch_keystore" "test" { + deployment_id = "0a592ab2c5baf0fa95c77ac62135782e" + setting_name = "xpack.notification.slack.account.hello.secure_url" + value = <`), + }, + { + name: "has size but no size_resource", + args: args{ + size: ec.String("15g"), + }, + want: &models.TopologySize{ + Value: ec.Int32(15360), + Resource: ec.String("memory"), + }, + }, + { + name: "has size and explicit size_resource (memory)", + args: args{ + size: ec.String("8g"), + resource: ec.String("memory"), + }, + want: &models.TopologySize{ + Value: ec.Int32(8192), + Resource: ec.String("memory"), + }, + }, + { + name: "has size and explicit size_resource (storage)", + args: args{ + size: ec.String("4g"), + resource: ec.String("storage"), + }, + want: &models.TopologySize{ + Value: ec.Int32(4096), + Resource: ec.String("storage"), + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseTopologySize(tt.args.size, tt.args.resource) + assert.Equal(t, tt.err, err) + assert.Equal(t, tt.want, got) + }) + } +} diff --git a/ec/internal/planmodifier/default_value.go b/ec/internal/planmodifier/default_value.go new file mode 100644 index 000000000..815ded36a --- /dev/null +++ b/ec/internal/planmodifier/default_value.go @@ -0,0 +1,64 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// NOTE! copied from terraform-provider-tls +package planmodifier + +import ( + "context" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +// defaultValueAttributePlanModifier specifies a default value (attr.Value) for an attribute. +type defaultValueAttributePlanModifier struct { + DefaultValue attr.Value +} + +// DefaultValue is a helper to instantiate a defaultValueAttributePlanModifier. +func DefaultValue(v attr.Value) tfsdk.AttributePlanModifier { + return &defaultValueAttributePlanModifier{v} +} + +var _ tfsdk.AttributePlanModifier = (*defaultValueAttributePlanModifier)(nil) + +func (m *defaultValueAttributePlanModifier) Description(ctx context.Context) string { + return m.MarkdownDescription(ctx) +} + +func (m *defaultValueAttributePlanModifier) MarkdownDescription(ctx context.Context) string { + return fmt.Sprintf("Sets the default value %q (%s) if the attribute is not set", m.DefaultValue, m.DefaultValue.Type(ctx)) +} + +func (m *defaultValueAttributePlanModifier) Modify(_ context.Context, req tfsdk.ModifyAttributePlanRequest, resp *tfsdk.ModifyAttributePlanResponse) { + if resp.AttributePlan == nil || req.AttributeConfig == nil { + return + } + + if !req.AttributeConfig.IsNull() { + return + } + + // if the config is the unknown value, use the unknown value otherwise, interpolation gets messed up + if req.AttributeConfig.IsUnknown() { + return + } + + resp.AttributePlan = m.DefaultValue +} diff --git a/ec/ecresource/trafficfilterassocresource/flatteners.go b/ec/internal/provider.go similarity index 56% rename from ec/ecresource/trafficfilterassocresource/flatteners.go rename to ec/internal/provider.go index 5c419f4c8..1345c510e 100644 --- a/ec/ecresource/trafficfilterassocresource/flatteners.go +++ b/ec/internal/provider.go @@ -15,35 +15,32 @@ // specific language governing permissions and limitations // under the License. -package trafficfilterassocresource +package internal import ( - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" + "fmt" + + "github.com/hashicorp/terraform-plugin-framework/diag" + + "github.com/elastic/cloud-sdk-go/pkg/api" ) -func flatten(res *models.TrafficFilterRulesetInfo, d *schema.ResourceData) error { - if res == nil { - return nil - } +// ConvertProviderData is a helper function for DataSource.Configure and Resource.Configure implementations +func ConvertProviderData(providerData any) (*api.API, diag.Diagnostics) { + var diags diag.Diagnostics - var found bool - deploymentID := d.Get("deployment_id").(string) - for _, assoc := range res.Associations { - if *assoc.EntityType == entityType && *assoc.ID == deploymentID { - found = true - } + if providerData == nil { + return nil, diags } - if !found { - if err := d.Set("deployment_id", ""); err != nil { - return err - } - if err := d.Set("traffic_filter_id", ""); err != nil { - return err - } - d.SetId("") - } + client, ok := providerData.(*api.API) + if !ok { + diags.AddError( + "Unexpected Provider Data", + fmt.Sprintf("Expected *api.API, got: %T. Please report this issue to the provider developers.", providerData), + ) - return nil + return nil, diags + } + return client, diags } diff --git a/ec/internal/util/helpers.go b/ec/internal/util/helpers.go index 98625cee5..d0f892ea8 100644 --- a/ec/internal/util/helpers.go +++ b/ec/internal/util/helpers.go @@ -18,30 +18,17 @@ package util import ( - "fmt" + "os" + "strconv" + + "github.com/hashicorp/terraform-plugin-framework/attr" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/elastic/cloud-sdk-go/pkg/models" ) -// FlattenClusterEndpoint receives a ClusterMetadataInfo, parses the http and -// https endpoints and returns a map with two keys: `http_endpoint` and -// `https_endpoint` -func FlattenClusterEndpoint(metadata *models.ClusterMetadataInfo) map[string]interface{} { - if metadata == nil || metadata.Endpoint == "" || metadata.Ports == nil { - return nil - } - - var m = make(map[string]interface{}) - if metadata.Ports.HTTP != nil { - m["http_endpoint"] = fmt.Sprintf("http://%s:%d", metadata.Endpoint, *metadata.Ports.HTTP) - } - - if metadata.Ports.HTTPS != nil { - m["https_endpoint"] = fmt.Sprintf("https://%s:%d", metadata.Endpoint, *metadata.Ports.HTTPS) - } - - return m -} +// used in tests +var GetEnv = os.Getenv // IsCurrentEsPlanEmpty checks that the elasticsearch resource current plan is empty. func IsCurrentEsPlanEmpty(res *models.ElasticsearchResourceInfo) bool { @@ -73,3 +60,46 @@ func IsCurrentEssPlanEmpty(res *models.EnterpriseSearchResourceInfo) bool { var emptyPlanInfo = res.Info == nil || res.Info.PlanInfo == nil || res.Info.PlanInfo.Current == nil return emptyPlanInfo || res.Info.PlanInfo.Current.Plan == nil } + +// MultiGetenvOrDefault returns the value of the first environment variable in the +// given list that has a non-empty value. If none of the environment +// variables have a value, the default value is returned. +func MultiGetenvOrDefault(keys []string, defaultValue string) string { + for _, key := range keys { + if value := GetEnv(key); value != "" { + return value + } + } + return defaultValue +} + +func StringToBool(str string) (bool, error) { + if str == "" { + return false, nil + } + + v, err := strconv.ParseBool(str) + if err != nil { + return false, err + } + + return v, nil +} + +func StringListAsType(in []string) types.List { + //goland:noinspection GoPreferNilSlice + out := []attr.Value{} + for _, value := range in { + out = append(out, types.String{Value: value}) + } + return types.List{ElemType: types.StringType, Elems: out} +} + +func StringMapAsType(in map[string]string) types.Map { + //goland:noinspection GoPreferNilSlice + out := make(map[string]attr.Value, len(in)) + for key, value := range in { + out[key] = types.String{Value: value} + } + return types.Map{ElemType: types.StringType, Elems: out} +} diff --git a/ec/internal/util/parsers.go b/ec/internal/util/parsers.go index 4c512c335..dd1b174c1 100644 --- a/ec/internal/util/parsers.go +++ b/ec/internal/util/parsers.go @@ -19,14 +19,8 @@ package util import ( "fmt" - - "github.com/elastic/cloud-sdk-go/pkg/api/deploymentapi/deploymentsize" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" ) -const defaultSizeResource = "memory" - // MemoryToState parses a megabyte int notation to a gigabyte notation. func MemoryToState(mem int32) string { if mem%1024 > 1 && mem%512 == 0 { @@ -34,25 +28,3 @@ func MemoryToState(mem int32) string { } return fmt.Sprintf("%dg", mem/1024) } - -// ParseTopologySize parses a flattened topology into its model. -func ParseTopologySize(topology map[string]interface{}) (*models.TopologySize, error) { - if mem, ok := topology["size"].(string); ok && mem != "" { - val, err := deploymentsize.ParseGb(mem) - if err != nil { - return nil, err - } - - var sizeResource = defaultSizeResource - if sr, ok := topology["size_resource"].(string); ok { - sizeResource = sr - } - - return &models.TopologySize{ - Value: ec.Int32(val), - Resource: ec.String(sizeResource), - }, nil - } - - return nil, nil -} diff --git a/ec/internal/util/parsers_test.go b/ec/internal/util/parsers_test.go index 317b50755..958490b9c 100644 --- a/ec/internal/util/parsers_test.go +++ b/ec/internal/util/parsers_test.go @@ -18,11 +18,8 @@ package util import ( - "errors" "testing" - "github.com/elastic/cloud-sdk-go/pkg/models" - "github.com/elastic/cloud-sdk-go/pkg/util/ec" "github.com/stretchr/testify/assert" ) @@ -53,71 +50,3 @@ func TestMemoryToState(t *testing.T) { }) } } - -func TestParseTopologySize(t *testing.T) { - type args struct { - topology map[string]interface{} - } - tests := []struct { - name string - args args - want *models.TopologySize - err error - }{ - { - name: "has no size returns nil", - }, - { - name: "has empty size returns nil", - args: args{topology: map[string]interface{}{ - "size": "", - }}, - }, - { - name: "has badly formatted size returns error", - args: args{topology: map[string]interface{}{ - "size": "asdasd", - }}, - err: errors.New(`failed to convert "asdasd" to `), - }, - { - name: "has size but no size_resource", - args: args{topology: map[string]interface{}{ - "size": "15g", - }}, - want: &models.TopologySize{ - Value: ec.Int32(15360), - Resource: ec.String("memory"), - }, - }, - { - name: "has size and explicit size_resource (memory)", - args: args{topology: map[string]interface{}{ - "size": "8g", - "size_resource": "memory", - }}, - want: &models.TopologySize{ - Value: ec.Int32(8192), - Resource: ec.String("memory"), - }, - }, - { - name: "has size and explicit size_resource (storage)", - args: args{topology: map[string]interface{}{ - "size": "4g", - "size_resource": "storage", - }}, - want: &models.TopologySize{ - Value: ec.Int32(4096), - Resource: ec.String("storage"), - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ParseTopologySize(tt.args.topology) - assert.Equal(t, tt.err, err) - assert.Equal(t, tt.want, got) - }) - } -} diff --git a/ec/internal/util/set_util.go b/ec/internal/util/resource_kind.go similarity index 60% rename from ec/internal/util/set_util.go rename to ec/internal/util/resource_kind.go index 0c0caa9cf..7658dcd3a 100644 --- a/ec/internal/util/set_util.go +++ b/ec/internal/util/resource_kind.go @@ -17,23 +17,29 @@ package util -import "sort" +type ResourceKind int -// StringToItems takes in a slice of strings and returns a []interface{}. -func StringToItems(elems ...string) (result []interface{}) { - for _, e := range elems { - result = append(result, e) - } - - return result -} +const ( + ApmResourceKind ResourceKind = iota + ElasticsearchResourceKind + EnterpriseSearchResourceKind + IntegrationsServerResourceKind + KibanaResourceKind +) -// ItemsToString takes in an []interface{} and returns a slice of strings. -func ItemsToString(elems []interface{}) (result []string) { - for _, e := range elems { - result = append(result, e.(string)) +func (rk ResourceKind) Name() string { + switch rk { + case ApmResourceKind: + return "APM" + case ElasticsearchResourceKind: + return "Elasticsearch" + case EnterpriseSearchResourceKind: + return "Enterprise Search" + case IntegrationsServerResourceKind: + return "Integrations Server" + case KibanaResourceKind: + return "Kibana" + default: + return "unknown" } - sort.Strings(result) - - return result } diff --git a/ec/internal/util/set_util_test.go b/ec/internal/util/set_util_test.go deleted file mode 100644 index 3c85e3b9a..000000000 --- a/ec/internal/util/set_util_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Licensed to Elasticsearch B.V. under one or more contributor -// license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright -// ownership. Elasticsearch B.V. licenses this file to you under -// the Apache License, Version 2.0 (the "License"); you may -// not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, -// software distributed under the License is distributed on an -// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -// KIND, either express or implied. See the License for the -// specific language governing permissions and limitations -// under the License. - -package util - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestStringItems(t *testing.T) { - type args struct { - elems []string - } - tests := []struct { - name string - args args - wantResult []interface{} - }{ - { - name: "empty list returns nil", - }, - { - name: "populated list returns the results as []interface{}", - args: args{elems: []string{"some", "some-other", ""}}, - wantResult: []interface{}{"some", "some-other", ""}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotResult := StringToItems(tt.args.elems...) - assert.Equal(t, tt.wantResult, gotResult) - }) - } -} - -func TestItemsToString(t *testing.T) { - type args struct { - elems []interface{} - } - tests := []struct { - name string - args args - wantResult []string - }{ - { - name: "empty list returns nil", - }, - { - name: "populated list returns the results as []string{}", - args: args{elems: []interface{}{"some", "some-other", ""}}, - wantResult: []string{"", "some", "some-other"}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - gotResult := ItemsToString(tt.args.elems) - assert.Equal(t, tt.wantResult, gotResult) - }) - } -} diff --git a/ec/internal/util/testutils.go b/ec/internal/util/testutils.go index b55318c56..287104f6f 100644 --- a/ec/internal/util/testutils.go +++ b/ec/internal/util/testutils.go @@ -20,11 +20,17 @@ package util import ( "context" "errors" + "fmt" "testing" - "github.com/elastic/cloud-sdk-go/pkg/multierror" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" + "github.com/stretchr/testify/assert" + + "github.com/elastic/cloud-sdk-go/pkg/multierror" ) // ResDataParams holds the raw configuration for NewResourceData to consume @@ -106,3 +112,15 @@ func generateRD(t *testing.T, schemaMap map[string]*schema.Schema, rawAttr map[s return result } + +// Check conversion to attr.Value +// it should catch cases when e.g. the func under test returns types.List{} +func CheckConverionToAttrValue(t *testing.T, dt datasource.DataSource, attributeName string, attributeValue types.List) { + schema, diags := dt.GetSchema(context.Background()) + assert.Nil(t, diags) + attrType := schema.Attributes[attributeName].FrameworkType() + assert.NotNil(t, attrType, fmt.Sprintf("Type of attribute '%s' cannot be nil", attributeName)) + var target types.List + diags = tfsdk.ValueFrom(context.Background(), attributeValue, attrType, &target) + assert.Nil(t, diags) +} diff --git a/ec/internal/util/traffic_filter_err_test.go b/ec/internal/util/traffic_filter_err_test.go index 5da4e4f5a..5ac31a5b4 100644 --- a/ec/internal/util/traffic_filter_err_test.go +++ b/ec/internal/util/traffic_filter_err_test.go @@ -20,9 +20,10 @@ package util import ( "testing" + "github.com/go-openapi/runtime" + "github.com/elastic/cloud-sdk-go/pkg/api/apierror" "github.com/elastic/cloud-sdk-go/pkg/client/deployments_traffic_filter" - "github.com/go-openapi/runtime" ) func TestTrafficFilterNotFound(t *testing.T) { diff --git a/ec/internal/validators/knownvalidator.go b/ec/internal/validators/knownvalidator.go new file mode 100644 index 000000000..8ef7bf0c0 --- /dev/null +++ b/ec/internal/validators/knownvalidator.go @@ -0,0 +1,58 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package validators + +import ( + "context" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" +) + +type knownValidator struct{} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v knownValidator) Description(ctx context.Context) string { + return "Value must be known" +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v knownValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v knownValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + if req.AttributeConfig.IsUnknown() { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + "Value must be known", + ) + return + } +} + +// Known returns an AttributeValidator which ensures that any configured +// attribute value: +// +// - Is known. +// +// Null (unconfigured) values are skipped. +func Known() tfsdk.AttributeValidator { + return knownValidator{} +} diff --git a/ec/internal/validators/urlvalidator.go b/ec/internal/validators/urlvalidator.go new file mode 100644 index 000000000..981f0530e --- /dev/null +++ b/ec/internal/validators/urlvalidator.go @@ -0,0 +1,101 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package validators + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "golang.org/x/exp/slices" +) + +type isURLWithSchemeValidator struct { + ValidSchemes []string +} + +// Description returns a plain text description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v isURLWithSchemeValidator) Description(ctx context.Context) string { + return fmt.Sprintf("Value must be a valid URL with scheme (%s)", strings.Join(v.ValidSchemes, ", ")) +} + +// MarkdownDescription returns a markdown formatted description of the validator's behavior, suitable for a practitioner to understand its impact. +func (v isURLWithSchemeValidator) MarkdownDescription(ctx context.Context) string { + return v.Description(ctx) +} + +// Validate runs the main validation logic of the validator, reading configuration data out of `req` and updating `resp` with diagnostics. +func (v isURLWithSchemeValidator) Validate(ctx context.Context, req tfsdk.ValidateAttributeRequest, resp *tfsdk.ValidateAttributeResponse) { + // types.String must be the attr.Value produced by the attr.Type in the schema for this attribute + // for generic validators, use + // https://pkg.go.dev/github.com/hashicorp/terraform-plugin-framework/tfsdk#ConvertValue + // to convert into a known type. + var str types.String + diags := tfsdk.ValueAs(ctx, req.AttributeConfig, &str) + resp.Diagnostics.Append(diags...) + if diags.HasError() { + return + } + + if str.Unknown || str.Null { + return + } + + if str.Value == "" { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL must not be empty, got %v.", str), + ) + return + } + + u, err := url.Parse(str.Value) + if err != nil { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL is invalid, got %v: %+v", str.Value, err), + ) + return + } + + if u.Host == "" { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL is missing host, got %v", str.Value), + ) + return + } + + if !slices.Contains(v.ValidSchemes, u.Scheme) { + resp.Diagnostics.AddAttributeError( + req.AttributePath, + v.Description(ctx), + fmt.Sprintf("URL is expected to have a valid scheme (one of '%v'), got %v (%v)", v.ValidSchemes, u.Scheme, str.Value), + ) + } +} + +func IsURLWithSchemeValidator(validSchemes []string) tfsdk.AttributeValidator { + return isURLWithSchemeValidator{ValidSchemes: validSchemes} +} diff --git a/ec/provider.go b/ec/provider.go index 39c1aecb5..c09717488 100644 --- a/ec/provider.go +++ b/ec/provider.go @@ -18,22 +18,29 @@ package ec import ( + "context" "fmt" "time" - "github.com/elastic/cloud-sdk-go/pkg/api" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation" + "github.com/hashicorp/terraform-plugin-framework/datasource" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/resource" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/deploymentsdatasource" - "github.com/elastic/terraform-provider-ec/ec/ecdatasource/privatelinkdatasource" "github.com/elastic/terraform-provider-ec/ec/ecdatasource/stackdatasource" "github.com/elastic/terraform-provider-ec/ec/ecresource/deploymentresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/elasticsearchkeystoreresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/extensionresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterassocresource" "github.com/elastic/terraform-provider-ec/ec/ecresource/trafficfilterresource" + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/elastic/terraform-provider-ec/ec/internal/validators" ) const ( @@ -60,108 +67,269 @@ var ( defaultTimeout = 40 * time.Second ) -// Provider returns a schema.Provider. -func Provider() *schema.Provider { - return &schema.Provider{ - ConfigureContextFunc: configureAPI, - Schema: newSchema(), - DataSourcesMap: map[string]*schema.Resource{ - "ec_deployment": deploymentdatasource.DataSource(), - "ec_deployments": deploymentsdatasource.DataSource(), - "ec_stack": stackdatasource.DataSource(), - "ec_aws_privatelink_endpoint": privatelinkdatasource.AwsDataSource(), - "ec_azure_privatelink_endpoint": privatelinkdatasource.AzureDataSource(), - "ec_gcp_private_service_connect_endpoint": privatelinkdatasource.GcpDataSource(), - }, - ResourcesMap: map[string]*schema.Resource{ - "ec_deployment": deploymentresource.Resource(), - "ec_deployment_elasticsearch_keystore": elasticsearchkeystoreresource.Resource(), - "ec_deployment_traffic_filter": trafficfilterresource.Resource(), - "ec_deployment_traffic_filter_association": trafficfilterassocresource.Resource(), - "ec_deployment_extension": extensionresource.Resource(), - }, +func New(version string) provider.Provider { + return &Provider{version: version} +} + +func ProviderWithClient(client *api.API, version string) provider.Provider { + return &Provider{client: client, version: version} +} + +var _ provider.Provider = (*Provider)(nil) +var _ provider.ProviderWithMetadata = (*Provider)(nil) + +type Provider struct { + version string + client *api.API +} + +func (p *Provider) Metadata(ctx context.Context, request provider.MetadataRequest, response *provider.MetadataResponse) { + response.TypeName = "ec" +} + +func (p *Provider) DataSources(ctx context.Context) []func() datasource.DataSource { + return []func() datasource.DataSource{ + func() datasource.DataSource { return &deploymentdatasource.DataSource{} }, + func() datasource.DataSource { return &deploymentsdatasource.DataSource{} }, + func() datasource.DataSource { return &stackdatasource.DataSource{} }, } } -func newSchema() map[string]*schema.Schema { - return map[string]*schema.Schema{ - "endpoint": { - Description: fmt.Sprintf(endpointDesc, api.ESSEndpoint), - Type: schema.TypeString, - Optional: true, - ValidateFunc: validation.IsURLWithScheme(validURLSchemes), - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_ENDPOINT", "EC_HOST"}, - api.ESSEndpoint, - ), - }, - "apikey": { - Description: apikeyDesc, - Type: schema.TypeString, - Optional: true, - Sensitive: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_API_KEY"}, "", - ), - }, - "username": { - Description: usernameDesc, - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_USER", "EC_USERNAME"}, "", - ), - }, - "password": { - Description: passwordDesc, - Type: schema.TypeString, - Optional: true, - Sensitive: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_PASS", "EC_PASSWORD"}, "", - ), - }, - "insecure": { - Description: insecureDesc, - Type: schema.TypeBool, - Optional: true, - Default: false, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, - false, - ), - }, - "timeout": { - Description: timeoutDesc, - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_TIMEOUT"}, defaultTimeout.String(), - ), - }, - "verbose": { - Description: verboseDesc, - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.MultiEnvDefaultFunc( - []string{"EC_VERBOSE"}, false, - ), - }, - "verbose_credentials": { - Description: verboseCredsDesc, - Type: schema.TypeBool, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc( - "EC_VERBOSE_CREDENTIALS", false, - ), - }, - "verbose_file": { - Description: timeoutDesc, - Type: schema.TypeString, - Optional: true, - DefaultFunc: schema.EnvDefaultFunc( - "EC_VERBOSE_FILE", "request.log", - ), +func (p *Provider) Resources(ctx context.Context) []func() resource.Resource { + return []func() resource.Resource{ + func() resource.Resource { return &elasticsearchkeystoreresource.Resource{} }, + func() resource.Resource { return &extensionresource.Resource{} }, + func() resource.Resource { return &deploymentresource.Resource{} }, + func() resource.Resource { return &trafficfilterresource.Resource{} }, + func() resource.Resource { return &trafficfilterassocresource.Resource{} }, + } +} + +func (p *Provider) GetSchema(context.Context) (tfsdk.Schema, diag.Diagnostics) { + var diags diag.Diagnostics + + return tfsdk.Schema{ + Attributes: map[string]tfsdk.Attribute{ + "endpoint": { + Description: fmt.Sprintf(endpointDesc, api.ESSEndpoint), + Type: types.StringType, + Optional: true, + Validators: []tfsdk.AttributeValidator{validators.Known(), validators.IsURLWithSchemeValidator(validURLSchemes)}, + }, + "apikey": { + Description: apikeyDesc, + Type: types.StringType, + Optional: true, + Sensitive: true, + }, + "username": { + Description: usernameDesc, + Type: types.StringType, + Optional: true, + }, + "password": { + Description: passwordDesc, + Type: types.StringType, + Optional: true, + Sensitive: true, + }, + "insecure": { + Description: insecureDesc, + Type: types.BoolType, + Optional: true, + }, + "timeout": { + Description: timeoutDesc, + Type: types.StringType, + Optional: true, + }, + "verbose": { + Description: verboseDesc, + Type: types.BoolType, + Optional: true, + }, + "verbose_credentials": { + Description: verboseCredsDesc, + Type: types.BoolType, + Optional: true, + }, + "verbose_file": { + Description: timeoutDesc, + Type: types.StringType, + Optional: true, + }, }, + }, diags +} + +// Retrieve provider data from configuration +type providerConfig struct { + Endpoint types.String `tfsdk:"endpoint"` + ApiKey types.String `tfsdk:"apikey"` + Username types.String `tfsdk:"username"` + Password types.String `tfsdk:"password"` + Insecure types.Bool `tfsdk:"insecure"` + Timeout types.String `tfsdk:"timeout"` + Verbose types.Bool `tfsdk:"verbose"` + VerboseCredentials types.Bool `tfsdk:"verbose_credentials"` + VerboseFile types.String `tfsdk:"verbose_file"` +} + +func (p *Provider) Configure(ctx context.Context, req provider.ConfigureRequest, resp *provider.ConfigureResponse) { + if p.client != nil { + // Required for unit tests, because a mock client is pre-created there. + resp.DataSourceData = p.client + resp.ResourceData = p.client + return } + + var config providerConfig + + diags := req.Config.Get(ctx, &config) + + resp.Diagnostics.Append(diags...) + + if resp.Diagnostics.HasError() { + return + } + + endpoint := config.Endpoint.Value + + if config.Endpoint.Null || config.Endpoint.Value == "" { + endpoint = util.MultiGetenvOrDefault([]string{"EC_ENDPOINT", "EC_HOST"}, api.ESSEndpoint) + + diags := validateEndpoint(ctx, endpoint) + + resp.Diagnostics.Append(diags...) + + if diags.HasError() { + return + } + } + + apiKey := config.ApiKey.Value + + if config.ApiKey.Null || config.ApiKey.Value == "" { + apiKey = util.MultiGetenvOrDefault([]string{"EC_API_KEY"}, "") + } + + username := config.Username.Value + + if config.Username.Null || config.Username.Value == "" { + username = util.MultiGetenvOrDefault([]string{"EC_USER", "EC_USERNAME"}, "") + } + + password := config.Password.Value + + if config.Password.Null || config.Password.Value == "" { + password = util.MultiGetenvOrDefault([]string{"EC_PASS", "EC_PASSWORD"}, "") + } + + timeoutStr := config.Timeout.Value + + if config.Timeout.Null || config.Timeout.Value == "" { + timeoutStr = util.MultiGetenvOrDefault([]string{"EC_TIMEOUT"}, defaultTimeout.String()) + } + + timeout, err := time.ParseDuration(timeoutStr) + + if err != nil { + resp.Diagnostics.AddError("Unable to create client", err.Error()) + return + } + + insecure := config.Insecure.Value + + if config.Insecure.Null { + insecureStr := util.MultiGetenvOrDefault([]string{"EC_INSECURE", "EC_SKIP_TLS_VALIDATION"}, "") + + if insecure, err = util.StringToBool(insecureStr); err != nil { + resp.Diagnostics.AddError( + "Unable to create client", + fmt.Sprintf("Invalid value '%v' in 'EC_INSECURE' or 'EC_SKIP_TLS_VALIDATION'", insecureStr), + ) + return + } + } + + verbose := config.Verbose.Value + + if config.Verbose.Null { + verboseStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE"}, "") + + if verbose, err = util.StringToBool(verboseStr); err != nil { + resp.Diagnostics.AddError( + "Unable to create client", + fmt.Sprintf("Invalid value '%v' in 'EC_VERBOSE'", verboseStr), + ) + return + } + } + + verboseCredentials := config.VerboseCredentials.Value + + if config.VerboseCredentials.Null { + verboseCredentialsStr := util.MultiGetenvOrDefault([]string{"EC_VERBOSE_CREDENTIALS"}, "") + + if verboseCredentials, err = util.StringToBool(verboseCredentialsStr); err != nil { + resp.Diagnostics.AddError( + "Unable to create client", + fmt.Sprintf("Invalid value '%v' in 'EC_VERBOSE_CREDENTIALS'", verboseCredentialsStr), + ) + return + } + } + + verboseFile := config.VerboseFile.Value + + if config.VerboseFile.Null { + verboseFile = util.MultiGetenvOrDefault([]string{"EC_VERBOSE_FILE"}, "request.log") + } + + cfg, err := newAPIConfig(apiSetup{ + endpoint: endpoint, + apikey: apiKey, + username: username, + password: password, + insecure: insecure, + timeout: timeout, + verbose: verbose, + verboseCredentials: verboseCredentials, + verboseFile: verboseFile, + }) + + if err != nil { + resp.Diagnostics.AddError( + "Unable to create api Client config", + err.Error(), + ) + return + } + + client, err := api.NewAPI(cfg) + + if err != nil { + resp.Diagnostics.AddError( + "Unable to create api Client config", + err.Error(), + ) + return + } + + p.client = client + resp.DataSourceData = client + resp.ResourceData = client +} + +func validateEndpoint(ctx context.Context, endpoint string) diag.Diagnostics { + validateReq := tfsdk.ValidateAttributeRequest{ + AttributePath: path.Root("endpoint"), + AttributeConfig: types.String{Value: endpoint}, + } + + validateResp := tfsdk.ValidateAttributeResponse{} + + validators.IsURLWithSchemeValidator(validURLSchemes).Validate(ctx, validateReq, &validateResp) + + return validateResp.Diagnostics } diff --git a/ec/provider_config.go b/ec/provider_config.go index f1a3ca8be..cd4d5007a 100644 --- a/ec/provider_config.go +++ b/ec/provider_config.go @@ -18,7 +18,6 @@ package ec import ( - "context" "fmt" "net/http" "os" @@ -26,8 +25,6 @@ import ( "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" - "github.com/hashicorp/terraform-plugin-sdk/v2/diag" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" ) const ( @@ -39,42 +36,35 @@ var ( DefaultHTTPRetries = 2 ) -// configureAPI implements schema.ConfigureContextFunc -func configureAPI(_ context.Context, d *schema.ResourceData) (interface{}, diag.Diagnostics) { - cfg, err := newAPIConfig(d) - if err != nil { - return nil, diag.FromErr(err) - } - - client, err := api.NewAPI(cfg) - if err != nil { - return nil, diag.FromErr(err) - } - - return client, nil +type apiSetup struct { + endpoint string + apikey string + username string + password string + insecure bool + timeout time.Duration + verbose bool + verboseCredentials bool + verboseFile string } -func newAPIConfig(d *schema.ResourceData) (api.Config, error) { - var cfg api.Config +func newAPIConfig(setup apiSetup) (api.Config, error) { - timeout, err := time.ParseDuration(d.Get("timeout").(string)) - if err != nil { - return cfg, err - } + var cfg api.Config authWriter, err := auth.NewAuthWriter(auth.Config{ - APIKey: d.Get("apikey").(string), - Username: d.Get("username").(string), - Password: d.Get("password").(string), + APIKey: setup.apikey, + Username: setup.username, + Password: setup.password, }) if err != nil { return cfg, err } verboseCfg, err := verboseSettings( - d.Get("verbose_file").(string), - d.Get("verbose").(bool), - !d.Get("verbose_credentials").(bool), + setup.verboseFile, + setup.verbose, + !setup.verboseCredentials, ) if err != nil { return cfg, err @@ -85,9 +75,9 @@ func newAPIConfig(d *schema.ResourceData) (api.Config, error) { Client: &http.Client{}, VerboseSettings: verboseCfg, AuthWriter: authWriter, - Host: d.Get("endpoint").(string), - SkipTLSVerify: d.Get("insecure").(bool), - Timeout: timeout, + Host: setup.endpoint, + SkipTLSVerify: setup.insecure, + Timeout: setup.timeout, UserAgent: userAgent(Version), Retries: DefaultHTTPRetries, }, nil diff --git a/ec/provider_config_test.go b/ec/provider_config_test.go index cda457e90..10d14760a 100644 --- a/ec/provider_config_test.go +++ b/ec/provider_config_test.go @@ -26,13 +26,11 @@ import ( "syscall" "testing" + "github.com/stretchr/testify/assert" + "github.com/elastic/cloud-sdk-go/pkg/api" "github.com/elastic/cloud-sdk-go/pkg/auth" "github.com/elastic/cloud-sdk-go/pkg/multierror" - "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" - "github.com/stretchr/testify/assert" - - "github.com/elastic/terraform-provider-ec/ec/internal/util" ) func Test_verboseSettings(t *testing.T) { @@ -101,61 +99,14 @@ func Test_verboseSettings(t *testing.T) { } func Test_newAPIConfig(t *testing.T) { - defer unsetECAPIKey(t)() + apiKeyObj := auth.APIKey("secret") - defaultCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{}, - }) - invalidTimeoutCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "timeout": "invalid", - }, - }) - - apiKeyCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - }, - }) - apiKeyObj := auth.APIKey("blih") - - userPassCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "username": "my-user", - "password": "my-pass", - }, - }) userPassObj := auth.UserLogin{ Username: "my-user", Password: "my-pass", Holder: new(auth.GenericHolder), } - insecureCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "insecure": true, - }, - }) - - verboseCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - }, - }) defer func() { os.Remove("request.log") }() @@ -164,43 +115,18 @@ func Test_newAPIConfig(t *testing.T) { if err != nil { t.Fatal(err) } + defer func() { customFile.Close() os.Remove(customFile.Name()) }() - verboseCustomFileCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - "verbose_file": customFile.Name(), - }, - }) - verboseAndCredsCustomFileCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - "verbose_file": customFile.Name(), - "verbose_credentials": true, - }, - }) + invalidPath := filepath.Join("a", "b", "c", "d", "e", "f", "g", "h", "invalid!") - verboseInvalidFileCfg := util.NewResourceData(t, util.ResDataParams{ - ID: "whocares", - Schema: newSchema(), - State: map[string]interface{}{ - "apikey": "blih", - "verbose": true, - "verbose_file": invalidPath, - "verbose_credentials": true, - }, - }) + type args struct { - d *schema.ResourceData + apiSetup apiSetup } + tests := []struct { name string args args @@ -210,19 +136,25 @@ func Test_newAPIConfig(t *testing.T) { }{ { name: "default config returns with authwriter error", - args: args{d: defaultCfg}, + args: args{ + apiSetup: apiSetup{ + timeout: defaultTimeout, + }, + }, err: multierror.NewPrefixed("authwriter", errors.New("one of apikey or username and password must be specified"), ), }, - { - name: "default config with invalid timeout returns with authwriter error", - args: args{d: invalidTimeoutCfg}, - err: errors.New(`time: invalid duration "invalid"`), - }, + { name: "custom config with apikey auth succeeds", - args: args{d: apiKeyCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + timeout: defaultTimeout, + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -233,9 +165,17 @@ func Test_newAPIConfig(t *testing.T) { Retries: DefaultHTTPRetries, }, }, + { name: "custom config with username/password auth succeeds", - args: args{d: userPassCfg}, + args: args{ + apiSetup: apiSetup{ + username: "my-user", + password: "my-pass", + timeout: defaultTimeout, + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -246,23 +186,17 @@ func Test_newAPIConfig(t *testing.T) { Retries: DefaultHTTPRetries, }, }, + { name: "custom config with insecure succeeds", - args: args{d: insecureCfg}, - want: api.Config{ - UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), - ErrorDevice: os.Stdout, - Host: api.ESSEndpoint, - AuthWriter: &apiKeyObj, - Client: &http.Client{}, - Timeout: defaultTimeout, - Retries: DefaultHTTPRetries, - SkipTLSVerify: true, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + insecure: true, + timeout: defaultTimeout, + endpoint: api.ESSEndpoint, + }, }, - }, - { - name: "custom config with insecure succeeds", - args: args{d: insecureCfg}, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -274,9 +208,18 @@ func Test_newAPIConfig(t *testing.T) { SkipTLSVerify: true, }, }, + { name: "custom config with verbose (default file) succeeds", - args: args{d: verboseCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: "request.log", + timeout: defaultTimeout, + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -292,9 +235,18 @@ func Test_newAPIConfig(t *testing.T) { }, wantFileName: "request.log", }, + { name: "custom config with verbose (custom file) succeeds", - args: args{d: verboseCustomFileCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: customFile.Name(), + timeout: defaultTimeout, + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -310,9 +262,19 @@ func Test_newAPIConfig(t *testing.T) { }, wantFileName: filepath.Base(customFile.Name()), }, + { name: "custom config with verbose and verbose_credentials (custom file) succeeds", - args: args{d: verboseAndCredsCustomFileCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: customFile.Name(), + verboseCredentials: true, + timeout: defaultTimeout, + endpoint: api.ESSEndpoint, + }, + }, want: api.Config{ UserAgent: fmt.Sprintf(providerUserAgentFmt, Version, api.DefaultUserAgent), ErrorDevice: os.Stdout, @@ -328,9 +290,18 @@ func Test_newAPIConfig(t *testing.T) { }, wantFileName: filepath.Base(customFile.Name()), }, + { name: "custom config with verbose and verbose_credentials (invalid file) fails ", - args: args{d: verboseInvalidFileCfg}, + args: args{ + apiSetup: apiSetup{ + apikey: "secret", + verbose: true, + verboseFile: invalidPath, + verboseCredentials: true, + timeout: defaultTimeout, + }, + }, err: fmt.Errorf(`failed creating verbose file "%s": %w`, invalidPath, &os.PathError{ @@ -343,7 +314,7 @@ func Test_newAPIConfig(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := newAPIConfig(tt.args.d) + got, err := newAPIConfig(tt.args.apiSetup) assert.Equal(t, tt.err, err) if got.Verbose && err == nil { @@ -358,20 +329,3 @@ func Test_newAPIConfig(t *testing.T) { }) } } - -func unsetECAPIKey(t *testing.T) func() { - t.Helper() - // This is necessary to avoid any EC_API_KEY which might be set to cause - // test flakyness. - if k := os.Getenv("EC_API_KEY"); k != "" { - if err := os.Unsetenv("EC_API_KEY"); err != nil { - t.Fatal(err) - } - return func() { - if err := os.Setenv("EC_API_KEY", k); err != nil { - t.Fatal(err) - } - } - } - return func() {} -} diff --git a/ec/provider_test.go b/ec/provider_test.go new file mode 100644 index 000000000..50192cfc5 --- /dev/null +++ b/ec/provider_test.go @@ -0,0 +1,193 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package ec + +import ( + "context" + "testing" + + "github.com/elastic/terraform-provider-ec/ec/internal/util" + "github.com/hashicorp/terraform-plugin-framework/diag" + "github.com/hashicorp/terraform-plugin-framework/path" + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/tfsdk" + "github.com/hashicorp/terraform-plugin-framework/types" + "github.com/stretchr/testify/assert" +) + +func Test_Configure(t *testing.T) { + type args struct { + env map[string]string + config providerConfig + } + + tests := []struct { + name string + args args + diags diag.Diagnostics + }{ + { + name: `provider config doesn't define "endpoint" and "EC_ENDPOINT" is defined and invalid`, + args: args{ + env: map[string]string{ + "EC_ENDPOINT": "invalid", + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddAttributeError(path.Root("endpoint"), "Value must be a valid URL with scheme (http, https)", "URL is missing host, got invalid") + return diags + }(), + }, + + { + name: `provider config and env vars don't define either api key or user login/passwords`, + args: args{ + env: map[string]string{ + "EC_ENDPOINT": "https://cloud.elastic.co/api", + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create api Client config", "authwriter: 1 error occurred:\n\t* one of apikey or username and password must be specified\n\n") + return diags + }(), + }, + + { + name: `provider config doesn't define "insecure" and "EC_INSECURE" contains invalid value`, + args: args{ + env: map[string]string{ + "EC_INSECURE": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Value: "https://cloud.elastic.co/api"}, + ApiKey: types.String{Value: "secret"}, + Insecure: types.Bool{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create client", "Invalid value 'invalid' in 'EC_INSECURE' or 'EC_SKIP_TLS_VALIDATION'") + return diags + }(), + }, + + { + name: `provider config doesn't define "verbose" and "EC_VERBOSE" contains invalid value`, + args: args{ + env: map[string]string{ + "EC_VERBOSE": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Value: "https://cloud.elastic.co/api"}, + ApiKey: types.String{Value: "secret"}, + Verbose: types.Bool{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create client", "Invalid value 'invalid' in 'EC_VERBOSE'") + return diags + }(), + }, + + { + name: `provider config doesn't define "verbose" and "EC_VERBOSE_CREDENTIALS" contains invalid value`, + args: args{ + env: map[string]string{ + "EC_VERBOSE_CREDENTIALS": "invalid", + }, + config: providerConfig{ + Endpoint: types.String{Value: "https://cloud.elastic.co/api"}, + ApiKey: types.String{Value: "secret"}, + VerboseCredentials: types.Bool{Null: true}, + }, + }, + diags: func() diag.Diagnostics { + var diags diag.Diagnostics + diags.AddError("Unable to create client", "Invalid value 'invalid' in 'EC_VERBOSE_CREDENTIALS'") + return diags + }(), + }, + + { + name: `provider config is read from environment variables`, + args: args{ + env: map[string]string{ + "EC_ENDPOINT": "https://cloud.elastic.co/api", + "EC_API_KEY": "secret", + "EC_INSECURE": "true", + "EC_TIMEOUT": "1m", + "EC_VERBOSE": "true", + "EC_VERBOSE_CREDENTIALS": "true", + "EC_VERBOSE_FILE": "requests.log", + }, + config: providerConfig{ + Endpoint: types.String{Null: true}, + ApiKey: types.String{Null: true}, + Insecure: types.Bool{Null: true}, + Timeout: types.String{Null: true}, + Verbose: types.Bool{Null: true}, + VerboseCredentials: types.Bool{Null: true}, + VerboseFile: types.String{Null: true}, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var p Provider + + schema, diags := p.GetSchema(context.Background()) + + assert.Nil(t, diags) + + resp := provider.ConfigureResponse{} + + util.GetEnv = func(key string) string { + return tt.args.env[key] + } + + var config types.Object + + diags = tfsdk.ValueFrom(context.Background(), &tt.args.config, schema.Type(), &config) + + assert.Nil(t, diags) + + rawConfig, err := config.ToTerraformValue(context.Background()) + + assert.Nil(t, err) + + p.Configure( + context.Background(), + provider.ConfigureRequest{ + Config: tfsdk.Config{Schema: schema, Raw: rawConfig}, + }, + &resp, + ) + + if tt.diags != nil { + assert.Equal(t, tt.diags, resp.Diagnostics) + } else { + assert.Nil(t, resp.Diagnostics) + } + }) + } +} diff --git a/ec/version.go b/ec/version.go index e00612ef5..eb125514b 100644 --- a/ec/version.go +++ b/ec/version.go @@ -18,4 +18,4 @@ package ec // Version contains the current terraform provider version. -const Version = "0.5.0-dev" +const Version = "0.6.0-dev" diff --git a/examples/deployment/deployment.tf b/examples/deployment/deployment.tf index e9395b99e..de0c178a7 100644 --- a/examples/deployment/deployment.tf +++ b/examples/deployment/deployment.tf @@ -1,7 +1,7 @@ terraform { # The Elastic Cloud provider is supported from ">=0.12" # Version later than 0.12.29 is required for this terraform block to work. - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { @@ -28,23 +28,25 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - config { + elasticsearch = { + + topology = { + "hot_content" = { + autoscaling = {} + } + } + config = { user_settings_yaml = file("./es_settings.yaml") } } - kibana {} + kibana = {} - enterprise_search { - topology { - zone_count = 1 - } + enterprise_search = { + zone_count = 1 } - apm { - topology { - size = "0.5g" - } + apm = { + size = "0.5g" } } \ No newline at end of file diff --git a/examples/deployment/outputs.tf b/examples/deployment/outputs.tf index 0d63eb92d..5fd329cf1 100644 --- a/examples/deployment/outputs.tf +++ b/examples/deployment/outputs.tf @@ -3,11 +3,11 @@ output "elasticsearch_version" { } output "elasticsearch_cloud_id" { - value = ec_deployment.example_minimal.elasticsearch[0].cloud_id + value = ec_deployment.example_minimal.elasticsearch.cloud_id } output "elasticsearch_https_endpoint" { - value = ec_deployment.example_minimal.elasticsearch[0].https_endpoint + value = ec_deployment.example_minimal.elasticsearch.https_endpoint } output "elasticsearch_username" { diff --git a/examples/deployment_ccs/deployment.tf b/examples/deployment_ccs/deployment.tf index e53bebc39..e71fe4023 100644 --- a/examples/deployment_ccs/deployment.tf +++ b/examples/deployment_ccs/deployment.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { @@ -24,11 +24,13 @@ resource "ec_deployment" "source_deployment" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } } @@ -40,11 +42,13 @@ resource "ec_deployment" "second_source" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" - elasticsearch { - topology { - id = "hot_content" - zone_count = 1 - size = "2g" + elasticsearch = { + topology = { + "hot_content" = { + zone_count = 1 + size = "2g" + autoscaling = {} + } } } } @@ -56,19 +60,26 @@ resource "ec_deployment" "ccs" { version = data.ec_stack.latest.version deployment_template_id = "aws-cross-cluster-search-v2" - elasticsearch { - remote_cluster { - deployment_id = ec_deployment.source_deployment.id - alias = ec_deployment.source_deployment.name - ref_id = ec_deployment.source_deployment.elasticsearch.0.ref_id + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } } - remote_cluster { - deployment_id = ec_deployment.second_source.id - alias = ec_deployment.second_source.name - ref_id = ec_deployment.second_source.elasticsearch.0.ref_id - } + remote_cluster = [ + { + deployment_id = ec_deployment.source_deployment.id + alias = ec_deployment.source_deployment.name + ref_id = ec_deployment.source_deployment.elasticsearch.0.ref_id + }, + { + deployment_id = ec_deployment.second_source.id + alias = ec_deployment.second_source.name + ref_id = ec_deployment.second_source.elasticsearch.0.ref_id + } + ] } - kibana {} + kibana = {} } diff --git a/examples/deployment_ec2_instance/elastic_deployment.tf b/examples/deployment_ec2_instance/elastic_deployment.tf index f6f9073b2..d1d0221f0 100644 --- a/examples/deployment_ec2_instance/elastic_deployment.tf +++ b/examples/deployment_ec2_instance/elastic_deployment.tf @@ -16,8 +16,15 @@ resource "ec_deployment" "deployment" { traffic_filter = [ec_deployment_traffic_filter.allow_my_instance.id] # Note the deployment will contain Elasticsearch and Kibana resources with default configurations. - elasticsearch {} - kibana {} + elasticsearch = { + topology = { + "hot_content" = { + autoscaling = {} + } + } + } + + kibana = {} } # Create a traffic filter to allow the instance's public IP address to access our deployment. diff --git a/examples/deployment_with_init/deployment.tf b/examples/deployment_with_init/deployment.tf index 28892498e..7300525c5 100644 --- a/examples/deployment_with_init/deployment.tf +++ b/examples/deployment_with_init/deployment.tf @@ -14,17 +14,17 @@ resource "ec_deployment" "example_minimal" { version = data.ec_stack.latest.version deployment_template_id = "aws-io-optimized-v2" traffic_filter = [ec_deployment_traffic_filter.allow_all.id] - elasticsearch { - topology { - id = "hot_content" - size = "8g" + elasticsearch = { + topology = { + "hot_content" = { + size = "8g" + autoscaling = {} + } } } - kibana { - topology { - size = "1g" - } + kibana = { + size = "1g" } } diff --git a/examples/deployment_with_init/main.tf b/examples/deployment_with_init/main.tf index b381e5491..06597b49c 100644 --- a/examples/deployment_with_init/main.tf +++ b/examples/deployment_with_init/main.tf @@ -1,16 +1,10 @@ resource "null_resource" "bootstrap-elasticsearch" { provisioner "local-exec" { - command = data.template_file.elasticsearch-configuration.rendered - } -} - -data "template_file" "elasticsearch-configuration" { - template = file("es_config.sh") - depends_on = [ec_deployment.example_minimal] - vars = { # Created servers and appropriate AZs - elastic-user = ec_deployment.example_minimal.elasticsearch_username - elastic-password = ec_deployment.example_minimal.elasticsearch_password - es-url = ec_deployment.example_minimal.elasticsearch[0].https_endpoint + command = templatefile("es_config.sh", { + elastic-user = ec_deployment.example_minimal.elasticsearch_username + elastic-password = ec_deployment.example_minimal.elasticsearch_password + es-url = ec_deployment.example_minimal.elasticsearch[0].https_endpoint + }) } -} \ No newline at end of file +} diff --git a/examples/deployment_with_init/provider.tf b/examples/deployment_with_init/provider.tf index 2ad127369..485098007 100644 --- a/examples/deployment_with_init/provider.tf +++ b/examples/deployment_with_init/provider.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { diff --git a/examples/extension_bundle/extension.tf b/examples/extension_bundle/extension.tf index e7fe767e7..71047427b 100644 --- a/examples/extension_bundle/extension.tf +++ b/examples/extension_bundle/extension.tf @@ -1,5 +1,5 @@ terraform { - required_version = ">= 0.12.29" + required_version = ">= 1.0" required_providers { ec = { diff --git a/gen/gen.go b/gen/gen.go index 68f6ae362..b533af753 100644 --- a/gen/gen.go +++ b/gen/gen.go @@ -17,6 +17,7 @@ // This program generates ec/version.go. It can be invoked by running // make generate +//go:build ignore // +build ignore package main diff --git a/go.mod b/go.mod index c7cfb7087..72d8cfa11 100644 --- a/go.mod +++ b/go.mod @@ -3,40 +3,42 @@ module github.com/elastic/terraform-provider-ec go 1.19 require ( + github.com/blang/semver v3.5.1+incompatible github.com/blang/semver/v4 v4.0.0 github.com/elastic/cloud-sdk-go v1.10.0 - github.com/go-openapi/runtime v0.25.0 + github.com/go-openapi/runtime v0.24.2 github.com/go-openapi/strfmt v0.21.3 + github.com/hashicorp/terraform-plugin-framework v0.14.0 + github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 + github.com/hashicorp/terraform-plugin-go v0.14.1 + github.com/hashicorp/terraform-plugin-log v0.7.0 github.com/hashicorp/terraform-plugin-sdk/v2 v2.24.1 github.com/stretchr/testify v1.8.1 + golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 ) require ( - github.com/PuerkitoBio/purell v1.1.1 // indirect - github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect - github.com/agext/levenshtein v1.2.2 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/apparentlymart/go-cidr v1.1.0 // indirect github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/fatih/color v1.13.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/analysis v0.21.2 // indirect - github.com/go-openapi/errors v0.20.2 // indirect + github.com/go-openapi/analysis v0.21.4 // indirect + github.com/go-openapi/errors v0.20.3 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.19.6 // indirect - github.com/go-openapi/loads v0.21.1 // indirect - github.com/go-openapi/spec v0.20.4 // indirect - github.com/go-openapi/swag v0.21.1 // indirect - github.com/go-openapi/validate v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.0 // indirect + github.com/go-openapi/loads v0.21.2 // indirect + github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/validate v0.22.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/go-cmp v0.5.9 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-checkpoint v0.5.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 // indirect - github.com/hashicorp/go-hclog v1.2.1 // indirect + github.com/hashicorp/go-hclog v1.3.1 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/go-plugin v1.4.6 // indirect github.com/hashicorp/go-uuid v1.0.3 // indirect @@ -46,36 +48,32 @@ require ( github.com/hashicorp/logutils v1.0.0 // indirect github.com/hashicorp/terraform-exec v0.17.3 // indirect github.com/hashicorp/terraform-json v0.14.0 // indirect - github.com/hashicorp/terraform-plugin-go v0.14.1 // indirect - github.com/hashicorp/terraform-plugin-log v0.7.0 // indirect github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c // indirect github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 // indirect - github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d // indirect + github.com/hashicorp/yamux v0.1.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-testing-interface v1.14.1 // indirect - github.com/mitchellh/go-wordwrap v1.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect - github.com/oklog/run v1.0.0 // indirect + github.com/oklog/run v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect github.com/vmihailenco/msgpack/v4 v4.3.12 // indirect - github.com/vmihailenco/tagparser v0.1.1 // indirect + github.com/vmihailenco/tagparser v0.1.2 // indirect github.com/zclconf/go-cty v1.12.1 // indirect go.mongodb.org/mongo-driver v1.10.0 // indirect - go.opentelemetry.io/otel v1.11.1 // indirect - go.opentelemetry.io/otel/trace v1.11.1 // indirect golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d // indirect - golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd // indirect - golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 // indirect + golang.org/x/net v0.0.0-20220708220712-1185a9018129 // indirect + golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab // indirect golang.org/x/text v0.3.7 // indirect - google.golang.org/appengine v1.6.6 // indirect + google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20200711021454-869866162049 // indirect google.golang.org/grpc v1.50.1 // indirect google.golang.org/protobuf v1.28.1 // indirect diff --git a/go.sum b/go.sum index 1bc9b58c4..a175ed3dd 100644 --- a/go.sum +++ b/go.sum @@ -7,14 +7,12 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/agext/levenshtein v1.2.2 h1:0S/Yg6LYmFJ5stwQeRp6EeOcCbj7xiqQSdNelsXvaqE= -github.com/agext/levenshtein v1.2.2/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= @@ -34,6 +32,8 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -66,11 +66,6 @@ github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= @@ -82,8 +77,9 @@ github.com/go-openapi/analysis v0.19.16/go.mod h1:GLInF007N83Ad3m8a/CbQ5TPzdnGT7 github.com/go-openapi/analysis v0.20.0/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/analysis v0.20.1/go.mod h1:BMchjvaHDykmRMsK40iPtvyOfFdMMxlOmQr9FBZk+Og= github.com/go-openapi/analysis v0.21.1/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.2 h1:hXFrOYFHUAMQdu6zwAiKKJHJQ8kqZs1ux/ru1P1wLJU= github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= +github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= +github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= @@ -93,8 +89,9 @@ github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/errors v0.20.3 h1:rz6kiC84sqNQoqrtulzaL/VERgkoCyB6WdEkc2ujzUc= +github.com/go-openapi/errors v0.20.3/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= @@ -106,8 +103,9 @@ github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3Hfo github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= -github.com/go-openapi/jsonreference v0.19.6 h1:UBIxjkht+AWIgYzCDSv2GN+E/togfwXUJFRTWhl2Jjs= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= +github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= @@ -119,8 +117,9 @@ github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hs github.com/go-openapi/loads v0.20.0/go.mod h1:2LhKquiE513rN5xC6Aan6lYOSddlL8Mp20AW9kpviM4= github.com/go-openapi/loads v0.20.2/go.mod h1:hTVUotJ+UonAMMZsvakEgmWKgtulweO9vYP2bQYKA/o= github.com/go-openapi/loads v0.21.0/go.mod h1:rHYve9nZrQ4CJhyeIIFJINGCg1tQpx2yJrrNo8sf1ws= -github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlDW0= github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= +github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= +github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= @@ -128,8 +127,8 @@ github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2g github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.23.0/go.mod h1:aQg+kaIQEn+A2CRSY1TxbM8+sT9g2V3aLc1FbIAnbbs= -github.com/go-openapi/runtime v0.25.0 h1:7yQTCdRbWhX8vnIjdzU8S00tBYf7Sg71EBeorlPHvhc= -github.com/go-openapi/runtime v0.25.0/go.mod h1:Ux6fikcHXyyob6LNWxtE96hWwjBPYF0DXgVFuMTneOs= +github.com/go-openapi/runtime v0.24.2 h1:yX9HMGQbz32M87ECaAhGpJjBmErO3QLcgdZj9BzGx7c= +github.com/go-openapi/runtime v0.24.2/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -140,8 +139,10 @@ github.com/go-openapi/spec v0.19.15/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFu github.com/go-openapi/spec v0.20.0/go.mod h1:+81FIL1JwC5P3/Iuuozq3pPE9dXdIEGxFutcFKaVbmU= github.com/go-openapi/spec v0.20.1/go.mod h1:93x7oh+d+FQsmsieroS4cmR3u0p/ywH649a3qwC9OsQ= github.com/go-openapi/spec v0.20.3/go.mod h1:gG4F8wdEDN+YPBMVnzE85Rbhf+Th2DTvA9nFPQ5AYEg= -github.com/go-openapi/spec v0.20.4 h1:O8hJrt0UMnhHcluhIdUgCLRWyM2x7QkBXRvOs7m+O1M= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= +github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= +github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= @@ -167,8 +168,9 @@ github.com/go-openapi/swag v0.19.12/go.mod h1:eFdyEBkTdoAf/9RXBvj4cr1nH7GD8Kzo5H github.com/go-openapi/swag v0.19.13/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1 h1:wm0rhTb5z7qpJRHBdPOMuY4QjVUMbF6/kwoYeRAOrKU= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= @@ -177,8 +179,9 @@ github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0 github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= github.com/go-openapi/validate v0.20.1/go.mod h1:b60iJT+xNNLfaQJUqLI7946tYiFEOuE9E4k54HpKcJ0= github.com/go-openapi/validate v0.20.3/go.mod h1:goDdqVGiigM3jChcrYJxD2joalke3ZXeftD16byIjA4= -github.com/go-openapi/validate v0.21.0 h1:+Wqk39yKOhfpLqNLEC0/eViCkzM5FVXVqrvt526+wcI= github.com/go-openapi/validate v0.21.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= +github.com/go-openapi/validate v0.22.0 h1:b0QecH6VslW/TxtpKgzpO1SNG7GU2FsaqKdP1E2T50Y= +github.com/go-openapi/validate v0.22.0/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4= @@ -247,8 +250,8 @@ github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9n github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320 h1:1/D3zfFHttUKaCaGKZ/dR2roBXv0vKbSCnssIldfQdI= github.com/hashicorp/go-cty v1.4.1-0.20200414143053-d3edf31b6320/go.mod h1:EiZBMaudVLy8fmjf9Npq1dq9RalhveqZG5w/yz3mHWs= -github.com/hashicorp/go-hclog v1.2.1 h1:YQsLlGDJgwhXFpucSPyVbCBviQtjlHv3jLTlp8YmtEw= -github.com/hashicorp/go-hclog v1.2.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.3.1 h1:vDwF1DFNZhntP4DAjuTpOw3uEgMUpXh1pB5fW9DqHpo= +github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-plugin v1.4.6 h1:MDV3UrKQBM3du3G7MApDGvOsMYy3JQJ4exhSoKBAeVA= @@ -270,6 +273,10 @@ github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjl github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s= github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM= +github.com/hashicorp/terraform-plugin-framework v0.14.0 h1:Mwj55u+Jc/QGM6fLBPCe1P+ZF3cuYs6wbCdB15lx/Dg= +github.com/hashicorp/terraform-plugin-framework v0.14.0/go.mod h1:wcZdk4+Uef6Ng+BiBJjGAcIPlIs5bhlEV/TA1k6Xkq8= +github.com/hashicorp/terraform-plugin-framework-validators v0.5.0 h1:eD79idhnJOBajkUMEbm0c8dOyOb/F49STbUEVojT6F4= +github.com/hashicorp/terraform-plugin-framework-validators v0.5.0/go.mod h1:NfGgclDM3FZqvNVppPKE2aHI1JAyT002ypPRya7ch3I= github.com/hashicorp/terraform-plugin-go v0.14.1 h1:cwZzPYla82XwAqpLhSzdVsOMU+6H29tczAwrB0z9Zek= github.com/hashicorp/terraform-plugin-go v0.14.1/go.mod h1:Bc/K6K26BQ2FHqIELPbpKtt2CzzbQou+0UQF3/0NsCQ= github.com/hashicorp/terraform-plugin-log v0.7.0 h1:SDxJUyT8TwN4l5b5/VkiTIaQgY6R+Y2BQ0sRZftGKQs= @@ -280,8 +287,8 @@ github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b57 github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c/go.mod h1:Wn3Na71knbXc1G8Lh+yu/dQWWJeFQEpDeJMtWMtlmNI= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734 h1:HKLsbzeOsfXmKNpr3GiT18XAblV0BjCbzL8KQAMZGa0= github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734/go.mod h1:kNDNcF7sN4DocDLBkQYz73HGKwN1ANB1blq4lIYLYvg= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ= -github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hashicorp/yamux v0.1.1 h1:yrQxtgseBDrq9Y652vSRDvsKCJKOUD+GzTS4Y0Y8pvE= +github.com/hashicorp/yamux v0.1.1/go.mod h1:CtWFDAQgb7dxtzFs4tWbplKIe2jSi3+5vKbgIO0SLnQ= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= @@ -325,24 +332,27 @@ github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsI github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= @@ -350,8 +360,8 @@ github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce h1:RPclfga2SEJmgMmz2k+Mg7cowZ8yv4Trqw9UsJby758= -github.com/oklog/run v1.0.0 h1:Ru7dDtJNOyC66gQ5dQmaCa0qIsAUFY3sFpK1Xk8igrw= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= @@ -403,8 +413,9 @@ github.com/vmihailenco/msgpack v4.0.4+incompatible h1:dSLoQfGFAo3F6OoNhwUmLwVgaU github.com/vmihailenco/msgpack v4.0.4+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1 h1:quXMXlA39OCbd2wAdTsGDlK9RkOk6Wuw+x37wVyIuWY= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc= +github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= @@ -432,13 +443,9 @@ go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= go.mongodb.org/mongo-driver v1.8.2/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= +go.mongodb.org/mongo-driver v1.8.3/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= go.mongodb.org/mongo-driver v1.10.0 h1:UtV6N5k14upNp4LTduX0QCufG124fSu25Wz9tu94GLg= go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.opentelemetry.io/otel v1.11.1 h1:4WLLAmcfkmDk2ukNXJyq3/kiz/3UzCaYq6PskJsaou4= -go.opentelemetry.io/otel v1.11.1/go.mod h1:1nNhXBbWSD0nsL38H6btgnFN2k4i0sNLHNNMZMSbUGE= -go.opentelemetry.io/otel/sdk v1.11.1 h1:F7KmQgoHljhUuJyA+9BiU+EkJfyX5nVVF4wyzWZpKxs= -go.opentelemetry.io/otel/trace v1.11.1 h1:ofxdnzsNrGBYXbP7t7zpUK281+go5rF7dvdIZXF8gdQ= -go.opentelemetry.io/otel/trace v1.11.1/go.mod h1:f/Q9G7vzk5u91PhbmKbg1Qn0rzH1LJ4vbPHFGkTPtOk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -456,6 +463,8 @@ golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d h1:sK3txAijHtOK88l68nt020reeT1ZdKLIYetKl95FzVY= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20221012211006-4de253d81b95 h1:sBdrWpxhGDdTAYNqbgBLAR+ULAPPhfgncLr1X0lyWtg= +golang.org/x/exp v0.0.0-20221012211006-4de253d81b95/go.mod h1:cyybsKvd6eL0RnXn6p/Grxp8F5bW7iYuBgsNCOHpMYE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -485,8 +494,9 @@ golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5o golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd h1:O7DYs+zxREGLKzKoMQrtrEacpb0ZVXA5rIwylE2Xchk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220708220712-1185a9018129 h1:vucSRfWwTsoXro7P+3Cjlr6flUMtzCwzlvkxEQtHHB0= +golang.org/x/net v0.0.0-20220708220712-1185a9018129/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -524,12 +534,14 @@ golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8 h1:h+EGohizhe9XlX18rfpa8k8RAc5XyaeamM+0VHRd4lc= -golang.org/x/sys v0.0.0-20220919091848-fb04ddd9f9c8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab h1:2QkjZIsXupsJbJIdSjjUOgWK3aEtzyuh2mPt3l/CkeU= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -555,8 +567,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= diff --git a/main.go b/main.go index 0744ef73c..832b96f9e 100644 --- a/main.go +++ b/main.go @@ -18,10 +18,14 @@ package main import ( + "context" "flag" + "log" "github.com/elastic/terraform-provider-ec/ec" - "github.com/hashicorp/terraform-plugin-sdk/v2/plugin" + + "github.com/hashicorp/terraform-plugin-framework/provider" + "github.com/hashicorp/terraform-plugin-framework/providerserver" ) //go:generate go run ./gen/gen.go @@ -30,13 +34,19 @@ import ( const ProviderAddr = "registry.terraform.io/elastic/ec" func main() { - var debugMode bool - flag.BoolVar(&debugMode, "debug", false, "set to true to run the provider with support for debuggers like delve") + var debug bool + + flag.BoolVar(&debug, "debug", false, "set to true to run the provider with support for debuggers like delve") flag.Parse() - plugin.Serve(&plugin.ServeOpts{ - ProviderFunc: ec.Provider, - Debug: debugMode, - ProviderAddr: ProviderAddr, - }) + opts := providerserver.ServeOpts{ + Address: ProviderAddr, + Debug: debug, + } + + err := providerserver.Serve(context.Background(), func() provider.Provider { return ec.New(ec.Version) }, opts) + + if err != nil { + log.Fatal(err) + } }